From commits-noreply at bitbucket.org Fri Jan 1 20:37:26 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 1 Jan 2010 19:37:26 +0000 (UTC) Subject: [py-svn] py-trunk commit f2b15f840071: run py.* tools through "-c import py ; py.cmdline.py*" by default Message-ID: <20100101193726.358137EE85@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262374618 -3600 # Node ID f2b15f840071de34272e3c3dac4bc678c1a54a56 # Parent 3b9141d6bd7df061b8f4bace364d1ea66fb0fe16 run py.* tools through "-c import py ; py.cmdline.py*" by default and introduce --tools-on-path to force discovery of tools from PATH --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -11,6 +11,13 @@ from py.impl.test.config import Config a from py.plugin import hookspec from py.builtin import print_ +def pytest_addoption(parser): + group = parser.getgroup("pylib") + group.addoption('--tools-on-path', + action="store_true", dest="toolsonpath", default=False, + help=("discover tools on PATH instead of going through py.cmdline.") + ) + pytest_plugins = '_pytest' def pytest_funcarg__linecomp(request): @@ -307,8 +314,15 @@ class TmpTestdir: return self.run(*fullargs) def _getpybinargs(self, scriptname): - script = py.path.local.sysfind(scriptname) - return script, + if self.request.config.getvalue("toolsonpath"): + script = py.path.local.sysfind(scriptname) + assert script, "script %r not found" % scriptname + return (script,) + else: + cmdlinename = scriptname.replace(".", "") + assert hasattr(py.cmdline, cmdlinename), cmdlinename + source = "import py ; py.cmdline.%s()" % cmdlinename + return (sys.executable, "-c", source,) def runpython(self, script): return self.run(py.std.sys.executable, script) From commits-noreply at bitbucket.org Fri Jan 1 21:07:33 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 1 Jan 2010 20:07:33 +0000 (UTC) Subject: [py-svn] py-trunk commit ff1a6628cd01: fix some failures introduced by the last commit, document new "pytestconfig" funcarg Message-ID: <20100101200733.3A6EC7EE85@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262376213 -3600 # Node ID ff1a6628cd01adb21a1992e737860aae23b88ba1 # Parent f2b15f840071de34272e3c3dac4bc678c1a54a56 fix some failures introduced by the last commit, document new "pytestconfig" funcarg --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -29,6 +29,7 @@ def pytest_collect_file(path, parent): return parent.Module(path, parent=parent) def pytest_funcarg__pytestconfig(request): + """ the pytest config object with access to command line opts.""" return request.config def pytest_collect_directory(path, parent): --- a/testing/cmdline/test_cmdline.py +++ b/testing/cmdline/test_cmdline.py @@ -3,14 +3,13 @@ import sys, py pytest_plugins = "pytest_pytester" @py.test.mark.multi(name=[x for x in dir(py.cmdline) if x[0] != "_"]) -def test_cmdmain(name): +def test_cmdmain(name, pytestconfig): main = getattr(py.cmdline, name) assert py.builtin.callable(main) assert name[:2] == "py" - scriptname = "py." + name[2:] - if sys.platform == "win32": - scriptname += ".exe" - assert py.path.local.sysfind(scriptname), scriptname + if pytestconfig.getvalue("toolsonpath"): + scriptname = "py." + name[2:] + assert py.path.local.sysfind(scriptname), scriptname class TestPyLookup: def test_basic(self, testdir): --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -338,8 +338,10 @@ class TmpTestdir: def spawn_pytest(self, string, expect_timeout=10.0): pexpect = py.test.importorskip("pexpect", "2.4") + if not self.request.config.getvalue("toolsonpath"): + py.test.skip("need --tools-on-path to run py.test script") basetemp = self.tmpdir.mkdir("pexpect") - invoke = "%s %s" % self._getpybinargs("py.test") + invoke = self._getpybinargs("py.test")[0] cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w")) child.timeout = expect_timeout --- a/CHANGELOG +++ b/CHANGELOG @@ -13,6 +13,9 @@ Changes between 1.X and 1.1.1 - new option: --confcutdir=dir will make py.test only consider conftest files that are relative to the specified dir. +- new funcarg: "pytestconfig" is the pytest config object for access + to command line args and can now be easily used in a test. + - install 'py.test' and `py.which` with a ``-$VERSION`` suffix to disambiguate between Python3, python2.X, Jython and PyPy installed versions. From commits-noreply at bitbucket.org Fri Jan 1 21:54:50 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 1 Jan 2010 20:54:50 +0000 (UTC) Subject: [py-svn] py-trunk commit ac6e3880bb7b: fix standalone script generation on windows, make tests not do a chdir() so that distributed testing discovers the transferred lib Message-ID: <20100101205450.3377F7EF23@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262379267 -3600 # Node ID ac6e3880bb7be0068cd91862c39386d943acbff8 # Parent ff1a6628cd01adb21a1992e737860aae23b88ba1 fix standalone script generation on windows, make tests not do a chdir() so that distributed testing discovers the transferred lib --- a/testing/plugin/test_pytest_genscript.py +++ b/testing/plugin/test_pytest_genscript.py @@ -4,11 +4,15 @@ import subprocess def pytest_funcarg__standalone(request): return request.cached_setup(scope="module", setup=lambda: Standalone(request)) +pytestmark = py.test.mark.nochdir + class Standalone: def __init__(self, request): self.testdir = request.getfuncargvalue("testdir") self.script = self.testdir.tmpdir.join("mypytest") - self.testdir.runpytest("--genscript=%s" % self.script) + result = self.testdir.runpytest("--genscript=%s" % self.script) + assert result.ret == 0 + assert self.script.check() def run(self, anypython, testdir, *args): testdir.chdir() --- a/py/plugin/pytest_genscript.py +++ b/py/plugin/pytest_genscript.py @@ -40,7 +40,7 @@ def main(pybasedir, outfile, infile): name2src = {} for f in files: - k = f.replace("/", ".")[:-3] + k = f.replace(os.sep, ".")[:-3] name2src[k] = open(f, "rb").read() data = pickle.dumps(name2src, 2) --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -69,8 +69,8 @@ class TmpTestdir: self.tmpdir = tmpdir.mkdir(name) self.plugins = [] self._syspathremove = [] - self.chdir() # always chdir - assert hasattr(self, '_olddir') + if not hasattr(request.function, "nochdir"): + self.chdir() # always chdir self.request.addfinalizer(self.finalize) def __repr__(self): @@ -280,12 +280,7 @@ class TmpTestdir: return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) def run(self, *cmdargs): - old = self.tmpdir.chdir() - #print "chdir", self.tmpdir - try: - return self._run(*cmdargs) - finally: - old.chdir() + return self._run(*cmdargs) def _run(self, *cmdargs): cmdargs = [str(x) for x in cmdargs] From commits-noreply at bitbucket.org Fri Jan 1 23:10:35 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 1 Jan 2010 22:10:35 +0000 (UTC) Subject: [py-svn] py-trunk commit de6c5417d3b3: remove/refine some doc strings. create popen-files with absolute paths. Message-ID: <20100101221035.DBFB77EE81@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262383500 -3600 # Node ID de6c5417d3b3b5e36fd03c77181e073352b50641 # Parent ac6e3880bb7be0068cd91862c39386d943acbff8 remove/refine some doc strings. create popen-files with absolute paths. --- a/py/impl/test/pycollect.py +++ b/py/impl/test/pycollect.py @@ -1,20 +1,5 @@ """ -Python related collection nodes. Here is an example of -a tree of collectors and test items that this modules provides:: - - Module # File - Class - Instance - Function - Generator - ... - Function - Generator - Function - - DoctestFile # File - DoctestFileContent # acts as Item - +Python related collection nodes. """ import py import inspect --- a/py/impl/test/collect.py +++ b/py/impl/test/collect.py @@ -1,6 +1,5 @@ """ -base test collection objects. -Collectors and test Items form a tree +base test collection objects. Collectors and test Items form a tree that is usually built iteratively. """ import py @@ -24,17 +23,8 @@ class HookProxy: return call_matching_hooks class Node(object): - """ base class for Nodes in the collection tree. - Collector nodes have children and - Item nodes are terminal. - - All nodes of the collection tree carry a _config - attribute for these reasons: - - to access custom Collection Nodes from a project - (defined in conftest's) - - to pickle themselves relatively to the "topdir" - - configuration/options for setup/teardown - stdout/stderr capturing and execution of test items + """ base class for all Nodes in the collection tree. + Collector subclasses have children, Items are terminal nodes. """ def __init__(self, name, parent=None, config=None): self.name = name @@ -262,12 +252,10 @@ class Node(object): return col._getitembynames(names) _fromtrail = staticmethod(_fromtrail) - def _repr_failure_py(self, excinfo, outerr=None): - assert outerr is None, "XXX deprecated" + def _repr_failure_py(self, excinfo): excinfo.traceback = self._prunetraceback(excinfo.traceback) - # XXX temporary hack: getrepr() should not take a 'style' argument - # at all; it should record all data in all cases, and the style - # should be parametrized in toterminal(). + # XXX should excinfo.getrepr record all data and toterminal() + # process it? if self.config.option.tbstyle == "short": style = "short" else: --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -284,8 +284,8 @@ class TmpTestdir: def _run(self, *cmdargs): cmdargs = [str(x) for x in cmdargs] - p1 = py.path.local("stdout") - p2 = py.path.local("stderr") + p1 = self.tmpdir.join("stdout") + p2 = self.tmpdir.join("stderr") print_("running", cmdargs, "curdir=", py.path.local()) f1 = p1.open("w") f2 = p2.open("w") From commits-noreply at bitbucket.org Sat Jan 2 11:58:02 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 2 Jan 2010 10:58:02 +0000 (UTC) Subject: [py-svn] py-trunk commit 546b8924bbff: slightly refine invocation of py.test: use the py lib that we got invoked with, Message-ID: <20100102105802.B5CA47EF2B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262429862 -3600 # Node ID 546b8924bbff4fbf57601b90da430e1f7238f3a2 # Parent de6c5417d3b3b5e36fd03c77181e073352b50641 slightly refine invocation of py.test: use the py lib that we got invoked with, does away with the need to not-chdir some tests --- a/testing/plugin/test_pytest_genscript.py +++ b/testing/plugin/test_pytest_genscript.py @@ -4,8 +4,6 @@ import subprocess def pytest_funcarg__standalone(request): return request.cached_setup(scope="module", setup=lambda: Standalone(request)) -pytestmark = py.test.mark.nochdir - class Standalone: def __init__(self, request): self.testdir = request.getfuncargvalue("testdir") --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -69,8 +69,7 @@ class TmpTestdir: self.tmpdir = tmpdir.mkdir(name) self.plugins = [] self._syspathremove = [] - if not hasattr(request.function, "nochdir"): - self.chdir() # always chdir + self.chdir() # always chdir self.request.addfinalizer(self.finalize) def __repr__(self): @@ -316,7 +315,9 @@ class TmpTestdir: else: cmdlinename = scriptname.replace(".", "") assert hasattr(py.cmdline, cmdlinename), cmdlinename - source = "import py ; py.cmdline.%s()" % cmdlinename + source = ("import sys ; sys.path.insert(0, %r); " + "import py ; py.cmdline.%s()" % + (str(py._dir.dirpath()), cmdlinename)) return (sys.executable, "-c", source,) def runpython(self, script): From commits-noreply at bitbucket.org Sat Jan 2 17:17:33 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 2 Jan 2010 16:17:33 +0000 (UTC) Subject: [py-svn] py-trunk commit fbcfccc6887e: streamlined plugin loading: order is now setuptools, ENV, commandline Message-ID: <20100102161733.BB68983868@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262449033 -3600 # Node ID fbcfccc6887ed019ba72c56e44bbde99117183a8 # Parent 546b8924bbff4fbf57601b90da430e1f7238f3a2 streamlined plugin loading: order is now setuptools, ENV, commandline and setuptools entry point names are turned to canonical namees ("pytest_*") --- a/testing/pytest/test_pluginmanager.py +++ b/testing/pytest/test_pluginmanager.py @@ -61,6 +61,8 @@ class TestBootstrapping: pluginmanager.consider_setuptools_entrypoints() plugin = pluginmanager.getplugin("mytestplugin") assert plugin.x == 42 + plugin2 = pluginmanager.getplugin("pytest_mytestplugin") + assert plugin2 == plugin def test_consider_setuptools_not_installed(self, monkeypatch): monkeypatch.setitem(py.std.sys.modules, 'pkg_resources', --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -237,3 +237,24 @@ def test_ensuretemp(recwarn): d2 = py.test.ensuretemp('hello') assert d1 == d2 assert d1.check(dir=1) + +def test_preparse_ordering(testdir, monkeypatch): + pkg_resources = py.test.importorskip("pkg_resources") + def my_iter(name): + assert name == "pytest11" + class EntryPoint: + name = "mytestplugin" + def load(self): + class PseudoPlugin: + x = 42 + return PseudoPlugin() + return iter([EntryPoint()]) + monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) + testdir.makeconftest(""" + pytest_plugins = "mytestplugin", + """) + monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin") + config = testdir.parseconfig() + plugin = config.pluginmanager.getplugin("mytestplugin") + assert plugin.x == 42 + --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -79,10 +79,10 @@ class Config(object): setattr(self.option, opt.dest, opt.default) def _preparse(self, args): + self.pluginmanager.consider_setuptools_entrypoints() + self.pluginmanager.consider_env() + self.pluginmanager.consider_preparse(args) self._conftest.setinitial(args) - self.pluginmanager.consider_setuptools_entrypoints() - self.pluginmanager.consider_preparse(args) - self.pluginmanager.consider_env() self.pluginmanager.do_addoption(self._parser) def parse(self, args): --- a/py/impl/test/pluginmanager.py +++ b/py/impl/test/pluginmanager.py @@ -85,10 +85,11 @@ class PluginManager(object): except ImportError: return # XXX issue a warning for ep in iter_entry_points('pytest11'): - if ep.name in self._name2plugin: + name = canonical_importname(ep.name) + if name in self._name2plugin: continue plugin = ep.load() - self.register(plugin, name=ep.name) + self.register(plugin, name=name) def consider_preparse(self, args): for opt1,opt2 in zip(args, args[1:]): --- a/CHANGELOG +++ b/CHANGELOG @@ -21,6 +21,10 @@ Changes between 1.X and 1.1.1 - new "pytestconfig" funcarg allows access to test config object +- streamlined plugin loading: order is now as documented in + customize.html: setuptools, ENV, commandline, conftest. + also setuptools entry point names are turned to canonical namees ("pytest_*") + - automatically skip tests that need 'capfd' but have no os.dup - allow pytest_generate_tests to be defined in classes as well From commits-noreply at bitbucket.org Sat Jan 2 18:32:25 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 2 Jan 2010 17:32:25 +0000 (UTC) Subject: [py-svn] py-trunk commit 25857e40de18: higher timeout to accomodate slower execution environments Message-ID: <20100102173225.1AD0F7EF2B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262453531 -3600 # Node ID 25857e40de181bee92ec5c82256ea00362d38b72 # Parent fbcfccc6887ed019ba72c56e44bbde99117183a8 higher timeout to accomodate slower execution environments --- a/testing/pytest/dist/test_nodemanage.py +++ b/testing/pytest/dist/test_nodemanage.py @@ -30,7 +30,7 @@ class TestNodeManager: "--tx", "3*popen")) nodemanager.setup_nodes([].append) - nodemanager.wait_nodesready(timeout=2.0) + nodemanager.wait_nodesready(timeout=10.0) def test_popen_rsync_subdir(self, testdir, mysetup): source, dest = mysetup.source, mysetup.dest From commits-noreply at bitbucket.org Sat Jan 2 23:33:28 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 2 Jan 2010 22:33:28 +0000 (UTC) Subject: [py-svn] py-trunk commit 610e6b54ebeb: enhance figleaf setup, enabled by default now (requires --figleaf). Generalize internal ability to show "hints" at the end of "-h". Message-ID: <20100102223328.D41AC83863@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262468933 -3600 # Node ID 610e6b54ebebdec7786e7baa51f9806391451d4c # Parent 25857e40de181bee92ec5c82256ea00362d38b72 enhance figleaf setup, enabled by default now (requires --figleaf). Generalize internal ability to show "hints" at the end of "-h". --- a/testing/pytest/test_parseopt.py +++ b/testing/pytest/test_parseopt.py @@ -8,12 +8,6 @@ class TestParser: out, err = capsys.readouterr() assert out.find("xyz") != -1 - def test_epilog(self): - parser = parseopt.Parser() - assert not parser.epilog - parser.epilog += "hello" - assert parser.epilog == "hello" - def test_group_add_and_get(self): parser = parseopt.Parser() group = parser.addgroup("hello", description="desc") @@ -117,9 +111,9 @@ class TestParser: def test_addoption_parser_epilog(testdir): testdir.makeconftest(""" def pytest_addoption(parser): - parser.epilog = "hello world" + parser.hints.append("hello world") """) result = testdir.runpytest('--help') #assert result.ret != 0 - assert result.stdout.fnmatch_lines(["*hello world*"]) + assert result.stdout.fnmatch_lines(["*hint: hello world*"]) --- a/CHANGELOG +++ b/CHANGELOG @@ -40,6 +40,10 @@ Changes between 1.X and 1.1.1 - change: the first pytest_collect_directory hook to return something will now prevent further hooks to be called. +- change: pytest figleaf now requires --figleaf to run and is turned + on by default (requires the 'figleaf' package though). Change + long command line options to be a bit shorter (see py.test -h). + - robustify capturing to survive if custom pytest_runtest_setup code failed and prevented the capturing setup code from running. --- a/py/plugin/pytest_figleaf.py +++ b/py/plugin/pytest_figleaf.py @@ -3,28 +3,30 @@ write and report coverage data with 'fig """ import py - -py.test.importorskip("figleaf.annotate_html") -import figleaf +py.test.importorskip("figleaf") +import figleaf.annotate_html def pytest_addoption(parser): group = parser.getgroup('figleaf options') - group.addoption('-F', action='store_true', default=False, + group.addoption('--figleaf', action='store_true', default=False, dest = 'figleaf', help=('trace python coverage with figleaf and write HTML ' 'for files below the current working dir')) - group.addoption('--figleaf-data', action='store', default='.figleaf', - dest='figleafdata', - help='path to coverage tracing file.') - group.addoption('--figleaf-html', action='store', default='html', - dest='figleafhtml', - help='path to the coverage html dir.') + group.addoption('--fig-data', action='store', default='.figleaf', + dest='figleafdata', metavar="dir", + help='set tracing file, default: ".figleaf".') + group.addoption('--fig-html', action='store', default='html', + dest='figleafhtml', metavar="dir", + help='set html reporting dir, default "html").') def pytest_configure(config): - figleaf.start() + if config.getvalue("figleaf"): + figleaf.start() def pytest_terminal_summary(terminalreporter): config = terminalreporter.config + if not config.getvalue("figleaf"): + return datafile = py.path.local(config.getvalue('figleafdata')) tw = terminalreporter._tw tw.sep('-', 'figleaf') --- a/testing/pytest/test_pluginmanager.py +++ b/testing/pytest/test_pluginmanager.py @@ -16,18 +16,17 @@ class TestBootstrapping: """) def test_plugin_skip(self, testdir, monkeypatch): - testdir.makepyfile(pytest_skipping1=""" + p = testdir.makepyfile(pytest_skipping1=""" import py py.test.skip("hello") """) - result = testdir.runpytest("-p", "skipping1") + p.copy(p.dirpath("pytest_skipping2.py")) + monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") + result = testdir.runpytest("-p", "skipping1", "--traceconfig") + assert result.ret == 0 result.stdout.fnmatch_lines([ - "*WARNING*could not import plugin*skipping1*hello*" - ]) - monkeypatch.setenv("PYTEST_PLUGINS", "skipping1") - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*WARNING*could not import plugin*skipping1*hello*" + "*hint*skipping2*hello*", + "*hint*skipping1*hello*", ]) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch): --- a/testing/plugin/test_pytest_figleaf.py +++ b/testing/plugin/test_pytest_figleaf.py @@ -2,16 +2,16 @@ import py def test_functional(testdir): py.test.importorskip("figleaf") - testdir.plugins.append("figleaf") testdir.makepyfile(""" def f(): x = 42 def test_whatever(): pass """) - result = testdir.runpytest('-F') + result = testdir.runpytest('--figleaf') assert result.ret == 0 assert result.stdout.fnmatch_lines([ '*figleaf html*' ]) #print result.stdout.str() + --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -80,8 +80,9 @@ def pytest_addoption(parser): if execnet: add_dist_options(parser) else: - parser.epilog = ( - "'execnet>=1.0.0b4' package required for --looponfailing / distributed testing.") + parser.hints.append( + "'execnet>=1.0.0b4' required for --looponfailing / distributed testing." + ) def add_dist_options(parser): # see http://pytest.org/help/dist") --- a/py/plugin/pytest_doctest.py +++ b/py/plugin/pytest_doctest.py @@ -18,7 +18,7 @@ from py.impl.code.code import TerminalRe import doctest def pytest_addoption(parser): - group = parser.getgroup("doctest options") + group = parser.getgroup("general") group.addoption("--doctest-modules", action="store_true", default=False, help="search all python files for doctests", --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -92,6 +92,7 @@ class Config(object): assert not hasattr(self, 'args'), ( "can only parse cmdline args at most once per Config object") self._preparse(args) + self._parser.hints.extend(self.pluginmanager._hints) args = self._parser.parse_setoption(args, self.option) if not args: args.append(py.std.os.getcwd()) --- a/py/impl/test/pluginmanager.py +++ b/py/impl/test/pluginmanager.py @@ -9,7 +9,7 @@ from py.impl.test.outcome import Skipped default_plugins = ( "default runner capture terminal mark skipping tmpdir monkeypatch " "recwarn pdb pastebin unittest helpconfig nose assertion genscript " - "logxml").split() + "logxml figleaf").split() def check_old_use(mod, modname): clsname = modname[len('pytest_'):].capitalize() + "Plugin" @@ -19,10 +19,11 @@ class PluginManager(object): def __init__(self): self.registry = Registry() self._name2plugin = {} + self._hints = [] self.hook = HookRelay(hookspecs=hookspec, registry=self.registry) self.register(self) for spec in default_plugins: - self.import_plugin(spec) + self.import_plugin(spec) def _getpluginname(self, plugin, name): if name is None: @@ -123,15 +124,17 @@ class PluginManager(object): raise except Skipped: e = py.std.sys.exc_info()[1] - self._warn("could not import plugin %r, reason: %r" %( - (modname, e.msg))) + self._hints.append("skipped plugin %r: %s" %((modname, e.msg))) else: check_old_use(mod, modname) self.register(mod) self.consider_module(mod) - def _warn(self, msg): - print ("===WARNING=== %s" % (msg,)) + def pytest_terminal_summary(self, terminalreporter): + tw = terminalreporter._tw + if terminalreporter.config.option.traceconfig: + for hint in self._hints: + tw.line("hint: %s" % hint) # # @@ -201,10 +204,8 @@ def importplugin(importspec): e = py.std.sys.exc_info()[1] if str(e).find(importspec) == -1: raise - #print "syspath:", py.std.sys.path - #print "curdir:", py.std.os.getcwd() - return __import__(importspec) # show the original exception - + # show the original exception, not the failing internal one + return __import__(importspec) class MultiCall: --- a/py/impl/test/parseopt.py +++ b/py/impl/test/parseopt.py @@ -24,7 +24,7 @@ class Parser: self._groups = [] self._processopt = processopt self._usage = usage - self.epilog = "" + self.hints = [] def processoption(self, option): if self._processopt: @@ -56,9 +56,7 @@ class Parser: self._anonymous.addoption(*opts, **attrs) def parse(self, args): - optparser = optparse.OptionParser(usage=self._usage) - # make sure anaonymous group is at the end - optparser.epilog = self.epilog + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: @@ -100,4 +98,15 @@ class OptionGroup: self.parser.processoption(option) self.options.append(option) - + +class MyOptionParser(optparse.OptionParser): + def __init__(self, parser): + self._parser = parser + optparse.OptionParser.__init__(self, usage=parser._usage) + def format_epilog(self, formatter): + hints = self._parser.hints + if hints: + s = "\n".join(["hint: " + x for x in hints]) + "\n" + s = "\n" + s + "\n" + return s + return "" From commits-noreply at bitbucket.org Sat Jan 2 23:33:30 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 2 Jan 2010 22:33:30 +0000 (UTC) Subject: [py-svn] py-trunk commit 2401446cdb0b: enable doctest plugin by default, add a --doctest-glob option and some documentation, regen plugin docs. Message-ID: <20100102223330.74ADD83868@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262471446 -3600 # Node ID 2401446cdb0b4eada921ff49af55ef76759fb154 # Parent 610e6b54ebebdec7786e7baa51f9806391451d4c enable doctest plugin by default, add a --doctest-glob option and some documentation, regen plugin docs. --- a/testing/plugin/test_pytest_doctest.py +++ b/testing/plugin/test_pytest_doctest.py @@ -14,14 +14,14 @@ class TestDoctests: """) for x in (testdir.tmpdir, checkfile): #print "checking that %s returns custom items" % (x,) - items, reprec = testdir.inline_genitems(x, '-p', 'doctest') + items, reprec = testdir.inline_genitems(x) assert len(items) == 1 assert isinstance(items[0], DoctestTextfile) def test_collect_module(self, testdir): path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): - items, reprec = testdir.inline_genitems(p, '-p', 'doctest', + items, reprec = testdir.inline_genitems(p, '--doctest-modules') assert len(items) == 1 assert isinstance(items[0], DoctestModule) @@ -32,7 +32,16 @@ class TestDoctests: >>> x == 1 False """) - reprec = testdir.inline_run(p, '-p', 'doctest') + reprec = testdir.inline_run(p, ) + reprec.assertoutcome(failed=1) + + def test_new_pattern(self, testdir): + p = testdir.maketxtfile(xdoc =""" + >>> x = 1 + >>> x == 1 + False + """) + reprec = testdir.inline_run(p, "--doctest-glob=x*.txt") reprec.assertoutcome(failed=1) def test_doctest_unexpected_exception(self, testdir): @@ -44,7 +53,7 @@ class TestDoctests: >>> x 2 """) - reprec = testdir.inline_run(p, '-p', 'doctest') + reprec = testdir.inline_run(p) call = reprec.getcall("pytest_runtest_logreport") assert call.report.failed assert call.report.longrepr @@ -63,7 +72,7 @@ class TestDoctests: ''' """) - reprec = testdir.inline_run(p, '-p', 'doctest', "--doctest-modules") + reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1) def test_doctestmodule_external(self, testdir): @@ -76,7 +85,7 @@ class TestDoctests: 2 ''' """) - result = testdir.runpytest(p, '-p', 'doctest', "--doctest-modules") + result = testdir.runpytest(p, "--doctest-modules") result.stdout.fnmatch_lines([ '004 *>>> i = 0', '005 *>>> i + 1', @@ -94,7 +103,7 @@ class TestDoctests: >>> i + 1 2 """) - result = testdir.runpytest(p, '-p', 'doctest') + result = testdir.runpytest(p) result.stdout.fnmatch_lines([ '001 >>> i = 0', '002 >>> i + 1', --- a/doc/test/plugin/figleaf.txt +++ b/doc/test/plugin/figleaf.txt @@ -13,12 +13,12 @@ command line options -------------------- -``-F`` +``--figleaf`` trace python coverage with figleaf and write HTML for files below the current working dir -``--figleaf-data=FIGLEAFDATA`` - path to coverage tracing file. -``--figleaf-html=FIGLEAFHTML`` - path to the coverage html dir. +``--fig-data=dir`` + set tracing file, default: ".figleaf". +``--fig-html=dir`` + set html reporting dir, default "html"). Start improving this plugin in 30 seconds ========================================= --- a/py/plugin/pytest_figleaf.py +++ b/py/plugin/pytest_figleaf.py @@ -17,7 +17,7 @@ def pytest_addoption(parser): help='set tracing file, default: ".figleaf".') group.addoption('--fig-html', action='store', default='html', dest='figleafhtml', metavar="dir", - help='set html reporting dir, default "html").') + help='set html reporting dir, default "html".') def pytest_configure(config): if config.getvalue("figleaf"): --- a/doc/test/plugin/doctest.txt +++ b/doc/test/plugin/doctest.txt @@ -10,20 +10,31 @@ collect and execute doctests from module Usage ------------- -By default all files matching the ``test_*.txt`` pattern will -be run with the ``doctest`` module. If you issue:: +By default all files matching the ``test*.txt`` pattern will +be run through the python standard ``doctest`` module. Issue:: + + py.test --doctest-glob='*.rst' + +to change the pattern. Additionally you can trigger running of +tests in all python modules (including regular python test modules):: py.test --doctest-modules -all python files in your projects will be doctest-run -as well. +You can also make these changes permanent in your project by +putting them into a conftest.py file like this:: + + # content of conftest.py + option_doctestmodules = True + option_doctestglob = "*.rst" command line options -------------------- ``--doctest-modules`` - search all python files for doctests + run doctests in all .py modules +``--doctest-glob=pat`` + doctests file matching pattern, default: test*.txt Start improving this plugin in 30 seconds ========================================= --- a/CHANGELOG +++ b/CHANGELOG @@ -44,6 +44,9 @@ Changes between 1.X and 1.1.1 on by default (requires the 'figleaf' package though). Change long command line options to be a bit shorter (see py.test -h). +- change: pytest doctest plugin is now enabled by default and has a + new option --doctest-glob to set a pattern for file matches. + - robustify capturing to survive if custom pytest_runtest_setup code failed and prevented the capturing setup code from running. --- a/doc/test/plugin/hookspec.txt +++ b/doc/test/plugin/hookspec.txt @@ -32,6 +32,7 @@ hook specification sourcecode def pytest_collect_directory(path, parent): """ return Collection node or None for the given path. """ + pytest_collect_directory.firstresult = True def pytest_collect_file(path, parent): """ return Collection node or None for the given path. """ --- a/py/plugin/pytest_doctest.py +++ b/py/plugin/pytest_doctest.py @@ -4,13 +4,22 @@ collect and execute doctests from module Usage ------------- -By default all files matching the ``test_*.txt`` pattern will -be run with the ``doctest`` module. If you issue:: +By default all files matching the ``test*.txt`` pattern will +be run through the python standard ``doctest`` module. Issue:: + + py.test --doctest-glob='*.rst' + +to change the pattern. Additionally you can trigger running of +tests in all python modules (including regular python test modules):: py.test --doctest-modules -all python files in your projects will be doctest-run -as well. +You can also make these changes permanent in your project by +putting them into a conftest.py file like this:: + + # content of conftest.py + option_doctestmodules = True + option_doctestglob = "*.rst" """ import py @@ -20,15 +29,20 @@ import doctest def pytest_addoption(parser): group = parser.getgroup("general") group.addoption("--doctest-modules", - action="store_true", default=False, - help="search all python files for doctests", + action="store_true", default=False, + help="run doctests in all .py modules", dest="doctestmodules") - + group.addoption("--doctest-glob", + action="store", default="test*.txt", metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") + def pytest_collect_file(path, parent): + config = parent.config if path.ext == ".py": - if parent.config.getvalue("doctestmodules"): + if config.getvalue("doctestmodules"): return DoctestModule(path, parent) - if path.check(fnmatch="test_*.txt"): + elif path.check(fnmatch=config.getvalue("doctestglob")): return DoctestTextfile(path, parent) class ReprFailDoctest(TerminalRepr): --- a/py/impl/test/pluginmanager.py +++ b/py/impl/test/pluginmanager.py @@ -9,7 +9,7 @@ from py.impl.test.outcome import Skipped default_plugins = ( "default runner capture terminal mark skipping tmpdir monkeypatch " "recwarn pdb pastebin unittest helpconfig nose assertion genscript " - "logxml figleaf").split() + "logxml figleaf doctest").split() def check_old_use(mod, modname): clsname = modname[len('pytest_'):].capitalize() + "Plugin" From commits-noreply at bitbucket.org Sun Jan 3 01:03:26 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 00:03:26 +0000 (UTC) Subject: [py-svn] py-trunk commit 5450c8db1a17: vastly simplify and cleanup collection initialization by internally Message-ID: <20100103000326.C411C7EF1E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262476964 -3600 # Node ID 5450c8db1a172c5ae9709ba684257d71af8d1625 # Parent 2401446cdb0b4eada921ff49af55ef76759fb154 vastly simplify and cleanup collection initialization by internally introducing a RootCollector. Note that the internal node methods _fromtrail and _totrail are shifted to the still internal config._rootcol.fromtrail/totrail --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -1,4 +1,5 @@ import py +from py.impl.test.collect import RootCollector class TestConfigCmdlineParsing: @@ -153,7 +154,7 @@ class TestConfigApi_getcolitems: assert isinstance(col, py.test.collect.Module) assert col.name == 'x.py' assert col.parent.name == tmpdir.basename - assert col.parent.parent is None + assert isinstance(col.parent.parent, RootCollector) for col in col.listchain(): assert col.config is config @@ -164,7 +165,7 @@ class TestConfigApi_getcolitems: assert isinstance(col, py.test.collect.Directory) print(col.listchain()) assert col.name == 'a' - assert col.parent is None + assert isinstance(col.parent, RootCollector) assert col.config is config def test__getcol_pkgfile(self, testdir, tmpdir): @@ -175,7 +176,7 @@ class TestConfigApi_getcolitems: assert isinstance(col, py.test.collect.Module) assert col.name == 'x.py' assert col.parent.name == x.dirpath().basename - assert col.parent.parent is None + assert isinstance(col.parent.parent.parent, RootCollector) for col in col.listchain(): assert col.config is config --- a/CHANGELOG +++ b/CHANGELOG @@ -55,6 +55,7 @@ Changes between 1.X and 1.1.1 which will regularly see e.g. py.test.mark and py.test.importorskip. - simplify internal plugin manager machinery +- simplify internal collection tree by introducing a RootCollector node - fix assert reinterpreation that sees a call containing "keyword=..." --- a/py/impl/test/looponfail/remote.py +++ b/py/impl/test/looponfail/remote.py @@ -136,8 +136,8 @@ def slave_runsession(channel, config, fu colitems = [] for trail in trails: try: - colitem = py.test.collect.Collector._fromtrail(trail, config) - except AssertionError: + colitem = config._rootcol.fromtrail(trail) + except ValueError: #XXX send info for "test disappeared" or so continue colitems.append(colitem) @@ -159,4 +159,5 @@ def slave_runsession(channel, config, fu session.config.hook.pytest_looponfailinfo( failreports=list(failreports), rootdirs=[config.topdir]) - channel.send([rep.getnode()._totrail() for rep in failreports]) + rootcol = session.config._rootcol + channel.send([rootcol.totrail(rep.getnode()) for rep in failreports]) --- a/testing/pytest/test_collect.py +++ b/testing/pytest/test_collect.py @@ -52,44 +52,8 @@ class TestCollector: parent = fn.getparent(py.test.collect.Class) assert parent is cls - def test_totrail_and_back(self, testdir, tmpdir): - a = tmpdir.ensure("a", dir=1) - tmpdir.ensure("a", "__init__.py") - x = tmpdir.ensure("a", "trail.py") - config = testdir.reparseconfig([x]) - col = config.getfsnode(x) - trail = col._totrail() - assert len(trail) == 2 - assert trail[0] == a.relto(config.topdir) - assert trail[1] == ('trail.py',) - col2 = py.test.collect.Collector._fromtrail(trail, config) - assert col2.listnames() == col.listnames() - - def test_totrail_topdir_and_beyond(self, testdir, tmpdir): - config = testdir.reparseconfig() - col = config.getfsnode(config.topdir) - trail = col._totrail() - assert len(trail) == 2 - assert trail[0] == '.' - assert trail[1] == () - col2 = py.test.collect.Collector._fromtrail(trail, config) - assert col2.fspath == config.topdir - assert len(col2.listchain()) == 1 - col3 = config.getfsnode(config.topdir.dirpath()) - py.test.raises(ValueError, - "col3._totrail()") - - def test_listnames_and__getitembynames(self, testdir): - modcol = testdir.getmodulecol("pass", withinit=True) - print(modcol.config.pluginmanager.getplugins()) - names = modcol.listnames() - print(names) - dircol = modcol.config.getfsnode(modcol.config.topdir) - x = dircol._getitembynames(names) - assert modcol.name == x.name - - def test_listnames_getitembynames_custom(self, testdir): + def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") testdir.makepyfile(conftest=""" import py @@ -98,15 +62,15 @@ class TestCollector: class MyDirectory(py.test.collect.Directory): def collect(self): return [CustomFile(self.fspath.join("hello.xxx"), parent=self)] - Directory = MyDirectory + def pytest_collect_directory(path, parent): + return MyDirectory(path, parent=parent) """) config = testdir.parseconfig(hello) node = config.getfsnode(hello) assert isinstance(node, py.test.collect.File) assert node.name == "hello.xxx" - names = node.listnames()[1:] - dircol = config.getfsnode(config.topdir) - node = dircol._getitembynames(names) + names = config._rootcol.totrail(node) + node = config._rootcol.getbynames(names) assert isinstance(node, py.test.collect.File) class TestCollectFS: @@ -232,3 +196,27 @@ class TestCustomConftests: "*MyModule*", "*test_x*" ]) + +class TestRootCol: + def test_totrail_and_back(self, testdir, tmpdir): + a = tmpdir.ensure("a", dir=1) + tmpdir.ensure("a", "__init__.py") + x = tmpdir.ensure("a", "trail.py") + config = testdir.reparseconfig([x]) + col = config.getfsnode(x) + trail = config._rootcol.totrail(col) + col2 = config._rootcol.fromtrail(trail) + assert col2 == col + + def test_totrail_topdir_and_beyond(self, testdir, tmpdir): + config = testdir.reparseconfig() + col = config.getfsnode(config.topdir) + trail = config._rootcol.totrail(col) + col2 = config._rootcol.fromtrail(trail) + assert col2.fspath == config.topdir + assert len(col2.listchain()) == 1 + py.test.raises(config.Error, "config.getfsnode(config.topdir.dirpath())") + #col3 = config.getfsnode(config.topdir.dirpath()) + #py.test.raises(ValueError, + # "col3._totrail()") + --- a/py/impl/test/collect.py +++ b/py/impl/test/collect.py @@ -115,7 +115,7 @@ class Node(object): l = [self] while 1: x = l[-1] - if x.parent is not None: + if x.parent is not None and x.parent.parent is not None: l.append(x.parent) else: if not rootfirst: @@ -130,62 +130,7 @@ class Node(object): while current and not isinstance(current, cls): current = current.parent return current - - def _getitembynames(self, namelist): - cur = self - for name in namelist: - if name: - next = cur.collect_by_name(name) - if next is None: - existingnames = [x.name for x in self._memocollect()] - msg = ("Collector %r does not have name %r " - "existing names are: %s" % - (cur, name, existingnames)) - raise AssertionError(msg) - cur = next - return cur - - def _getfsnode(self, path): - # this method is usually called from - # config.getfsnode() which returns a colitem - # from filename arguments - # - # pytest's collector tree does not neccessarily - # follow the filesystem and we thus need to do - # some special matching code here because - # _getitembynames() works by colitem names, not - # basenames. - if path == self.fspath: - return self - basenames = path.relto(self.fspath).split(path.sep) - cur = self - while basenames: - basename = basenames.pop(0) - assert basename - fspath = cur.fspath.join(basename) - colitems = cur._memocollect() - l = [] - for colitem in colitems: - if colitem.fspath == fspath or colitem.name == basename: - l.append(colitem) - if not l: - raise self.config.Error("can't collect: %s" %(fspath,)) - if basenames: - if len(l) > 1: - msg = ("Collector %r has more than one %r colitem " - "existing colitems are: %s" % - (cur, fspath, colitems)) - raise self.config.Error("xxx-too many test types for: %s" % (fspath, )) - cur = l[0] - else: - if len(l) > 1: - cur = l - else: - cur = l[0] - break - return cur - def readkeywords(self): return dict([(x, True) for x in self._keywords()]) @@ -228,30 +173,6 @@ class Node(object): def _prunetraceback(self, traceback): return traceback - def _totrail(self): - """ provide a trail relative to the topdir, - which can be used to reconstruct the - collector (possibly on a different host - starting from a different topdir). - """ - chain = self.listchain() - topdir = self.config.topdir - relpath = chain[0].fspath.relto(topdir) - if not relpath: - if chain[0].fspath == topdir: - relpath = "." - else: - raise ValueError("%r not relative to topdir %s" - %(chain[0].fspath, topdir)) - return relpath, tuple([x.name for x in chain[1:]]) - - def _fromtrail(trail, config): - relpath, names = trail - fspath = config.topdir.join(relpath) - col = config.getfsnode(fspath) - return col._getitembynames(names) - _fromtrail = staticmethod(_fromtrail) - def _repr_failure_py(self, excinfo): excinfo.traceback = self._prunetraceback(excinfo.traceback) # XXX should excinfo.getrepr record all data and toterminal() @@ -347,30 +268,15 @@ class FSCollector(Collector): self.fspath = fspath def __getstate__(self): - if self.parent is None: - # the root node needs to pickle more context info - topdir = self.config.topdir - relpath = self.fspath.relto(topdir) - if not relpath: - if self.fspath == topdir: - relpath = "." - else: - raise ValueError("%r not relative to topdir %s" - %(self.fspath, topdir)) - return (self.name, self.config, relpath) + if isinstance(self.parent, RootCollector): + relpath = self.parent._getrelpath(self.fspath) + return (relpath, self.parent) else: return (self.name, self.parent) def __setstate__(self, picklestate): - if len(picklestate) == 3: - # root node - name, config, relpath = picklestate - fspath = config.topdir.join(relpath) - fsnode = config.getfsnode(fspath) - self.__dict__.update(fsnode.__dict__) - else: - name, parent = picklestate - self.__init__(parent.fspath.join(name), parent=parent) + name, parent = picklestate + self.__init__(parent.fspath.join(name), parent=parent) class File(FSCollector): """ base class for collecting tests from a file. """ @@ -421,7 +327,8 @@ class Directory(FSCollector): l = [] for x in res: if x not in l: - assert x.parent == self, "wrong collection tree construction" + assert x.parent == self, (x.parent, self) + assert x.fspath == path, (x.fspath, path) l.append(x) res = l return res @@ -468,3 +375,67 @@ def warnoldtestrun(function=None): "implement item.runtest() instead of " "item.run() and item.execute()", stacklevel=2, function=function) + + + +class RootCollector(Directory): + def __init__(self, config): + Directory.__init__(self, config.topdir, parent=None, config=config) + self.name = None + + def getfsnode(self, path): + path = py.path.local(path) + if not path.check(): + raise self.config.Error("file not found: %s" %(path,)) + topdir = self.config.topdir + if path != topdir and not path.relto(topdir): + raise self.config.Error("path %r is not relative to %r" % + (str(path), str(self.fspath))) + # assumtion: pytest's fs-collector tree follows the filesystem tree + basenames = filter(None, path.relto(topdir).split(path.sep)) + try: + return self.getbynames(basenames) + except ValueError: + raise self.config.Error("can't collect: %s" % str(path)) + + def getbynames(self, names): + current = self.consider(self.config.topdir) + for name in names: + if name == ".": # special "identity" name + continue + l = [] + for x in current._memocollect(): + if x.name == name: + l.append(x) + elif x.fspath == current.fspath.join(name): + l.append(x) + if not l: + raise ValueError("no node named %r in %r" %(name, current)) + current = l[0] + return current + + def totrail(self, node): + chain = node.listchain() + names = [self._getrelpath(chain[0].fspath)] + names += [x.name for x in chain[1:]] + return names + + def fromtrail(self, trail): + return self.config._rootcol.getbynames(trail) + + def _getrelpath(self, fspath): + topdir = self.config.topdir + relpath = fspath.relto(topdir) + if not relpath: + if fspath == topdir: + relpath = "." + else: + raise ValueError("%r not relative to topdir %s" + %(self.fspath, topdir)) + return relpath + + def __getstate__(self): + return self.config + + def __setstate__(self, config): + self.__init__(config) --- a/testing/pytest/test_pycollect.py +++ b/testing/pytest/test_pycollect.py @@ -434,3 +434,9 @@ def test_generate_tests_only_done_in_sub result.stdout.fnmatch_lines([ "*3 passed*" ]) + +def test_modulecol_roundtrip(testdir): + modcol = testdir.getmodulecol("pass", withinit=True) + trail = modcol.config._rootcol.totrail(modcol) + newcol = modcol.config._rootcol.fromtrail(trail) + assert modcol.name == newcol.name --- a/testing/pytest/test_deprecated_api.py +++ b/testing/pytest/test_deprecated_api.py @@ -5,7 +5,7 @@ from py.impl.test.outcome import Skipped class TestCollectDeprecated: def test_collect_with_deprecated_run_and_join(self, testdir, recwarn): - testdir.makepyfile(conftest=""" + testdir.makeconftest(""" import py class MyInstance(py.test.collect.Instance): @@ -39,7 +39,8 @@ class TestCollectDeprecated: return self.Module(self.fspath.join(name), parent=self) def pytest_collect_directory(path, parent): - return MyDirectory(path, parent) + if path.basename == "subconf": + return MyDirectory(path, parent) """) subconf = testdir.mkpydir("subconf") somefile = subconf.join("somefile.py") --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -2,6 +2,7 @@ import py, os from py.impl.test.conftesthandle import Conftest from py.impl.test.pluginmanager import PluginManager from py.impl.test import parseopt +from py.impl.test.collect import RootCollector def ensuretemp(string, dir=1): """ (deprecated) return temporary directory path with @@ -97,6 +98,7 @@ class Config(object): if not args: args.append(py.std.os.getcwd()) self.topdir = gettopdir(args) + self._rootcol = RootCollector(config=self) self.args = [py.path.local(x) for x in args] # config objects are usually pickled across system @@ -117,6 +119,7 @@ class Config(object): py.test.config = self # next line will registers default plugins self.__init__(topdir=py.path.local()) + self._rootcol = RootCollector(config=self) args, cmdlineopts = repr args = [self.topdir.join(x) for x in args] self.option = cmdlineopts @@ -150,17 +153,7 @@ class Config(object): return [self.getfsnode(arg) for arg in self.args] def getfsnode(self, path): - path = py.path.local(path) - if not path.check(): - raise self.Error("file not found: %s" %(path,)) - # we want our possibly custom collection tree to start at pkgroot - pkgpath = path.pypkgpath() - if pkgpath is None: - pkgpath = path.check(file=1) and path.dirpath() or path - tmpcol = py.test.collect.Directory(pkgpath, config=self) - col = tmpcol.ihook.pytest_collect_directory(path=pkgpath, parent=tmpcol) - col.parent = None - return col._getfsnode(path) + return self._rootcol.getfsnode(path) def _getcollectclass(self, name, path): try: --- a/testing/plugin/test_pytest_resultlog.py +++ b/testing/plugin/test_pytest_resultlog.py @@ -4,9 +4,9 @@ from py.plugin.pytest_resultlog import g from py.impl.test.collect import Node, Item, FSCollector def test_generic_path(testdir): - config = testdir.Config() - p1 = Node('a', config=config) - assert p1.fspath is None + config = testdir.parseconfig() + p1 = Node('a', parent=config._rootcol) + #assert p1.fspath is None p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) item = Item('c', parent = p3) @@ -14,7 +14,7 @@ def test_generic_path(testdir): res = generic_path(item) assert res == 'a.B().c' - p0 = FSCollector('proj/test', config=config) + p0 = FSCollector('proj/test', parent=config._rootcol) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) From commits-noreply at bitbucket.org Sun Jan 3 11:24:14 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 10:24:14 +0000 (UTC) Subject: [py-svn] py-trunk commit d0ab94d109e7: disable default inclusion of figleaf plugin because it caused test Message-ID: <20100103102414.6E6287EF35@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262514152 -3600 # Node ID d0ab94d109e711cfe416932124aa50666032028a # Parent 5450c8db1a172c5ae9709ba684257d71af8d1625 disable default inclusion of figleaf plugin because it caused test failures wrt to capturing/logging interaction. pytest_figleaf should anyway better become its own externally living plugin. --- a/ISSUES.txt +++ b/ISSUES.txt @@ -99,3 +99,11 @@ The remaining uses of py.test.ensuretemp itself are for setup methods. Also users have expressed the wish to have funcargs available to setup functions. Experiment with allowing funcargs there and finalizing deprecating py.test.ensuretemp. + +outsource figleaf plugin +--------------------------------------- +tags: 1.2 + +Packages with external dependencies should be moved out +of the core distribution. Also figleaf could serve as +another prototype for an external plugin. --- a/py/impl/test/pluginmanager.py +++ b/py/impl/test/pluginmanager.py @@ -9,7 +9,7 @@ from py.impl.test.outcome import Skipped default_plugins = ( "default runner capture terminal mark skipping tmpdir monkeypatch " "recwarn pdb pastebin unittest helpconfig nose assertion genscript " - "logxml figleaf doctest").split() + "logxml doctest").split() def check_old_use(mod, modname): clsname = modname[len('pytest_'):].capitalize() + "Plugin" --- a/CHANGELOG +++ b/CHANGELOG @@ -40,9 +40,8 @@ Changes between 1.X and 1.1.1 - change: the first pytest_collect_directory hook to return something will now prevent further hooks to be called. -- change: pytest figleaf now requires --figleaf to run and is turned - on by default (requires the 'figleaf' package though). Change - long command line options to be a bit shorter (see py.test -h). +- change: figleaf plugin now requires --figleaf to run. Also + change its long command line options to be a bit shorter (see py.test -h). - change: pytest doctest plugin is now enabled by default and has a new option --doctest-glob to set a pattern for file matches. From commits-noreply at bitbucket.org Sun Jan 3 11:42:35 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 10:42:35 +0000 (UTC) Subject: [py-svn] py-trunk commit 2509c8220062: bumping version number: 1.2.0a1 Message-ID: <20100103104235.AB1027EF35@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262515346 -3600 # Node ID 2509c822006230713773c0f981bd5b6b4170432c # Parent d0ab94d109e711cfe416932124aa50666032028a bumping version number: 1.2.0a1 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ For questions please check out http://py .. _`py.path`: http://pylib.org/path.html .. _`py.code`: http://pylib.org/code.html -(c) Holger Krekel and others, 2009 +(c) Holger Krekel and others, 2004-2010 """ trunk = None def main(): @@ -28,7 +28,7 @@ def main(): name='py', description='py.test and pylib: rapid testing and development utils.', long_description = long_description, - version= trunk or '1.1.1post1', + version= trunk or '1.2.0a1', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], --- a/py/__init__.py +++ b/py/__init__.py @@ -7,9 +7,9 @@ and classes. The initpkg-dictionary be name->value mappings where value can be another namespace dictionary or an import path. -(c) Holger Krekel and others, 2009 +(c) Holger Krekel and others, 2004-2010 """ -version = "1.1.1post1" +version = "1.2.0a1" __version__ = version = version or "1.1.x" import py.apipkg From commits-noreply at bitbucket.org Sun Jan 3 12:41:59 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 11:41:59 +0000 (UTC) Subject: [py-svn] py-trunk commit 472d0a7c4136: re-arrange "py.test -h" command line option grouping and update some plugin docs. Message-ID: <20100103114159.EB2317EF2C@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262518889 -3600 # Node ID 472d0a7c413644069c025de0923d9719027c4326 # Parent 2509c822006230713773c0f981bd5b6b4170432c re-arrange "py.test -h" command line option grouping and update some plugin docs. --- a/doc/test/plugin/resultlog.txt +++ b/doc/test/plugin/resultlog.txt @@ -2,12 +2,15 @@ pytest_resultlog plugin ======================= -resultlog plugin for machine-readable logging of test results. +non-xml machine-readable logging of test results. .. contents:: :local: -Useful for buildbot integration code. +Useful for buildbot integration code. See the `PyPy-test`_ + web page for post-processing. + +.. _`PyPy-test`: http://codespeak.net:8099/summary command line options -------------------- --- a/doc/test/plugin/helpconfig.txt +++ b/doc/test/plugin/helpconfig.txt @@ -13,10 +13,12 @@ command line options -------------------- +``--version`` + display py lib version and import information. +``-p name`` + early-load given plugin (multi-allowed). ``--help-config`` show available conftest.py and ENV-variable names. -``--version`` - display py lib version and import information. Start improving this plugin in 30 seconds ========================================= --- a/doc/test/plugin/terminal.txt +++ b/doc/test/plugin/terminal.txt @@ -18,13 +18,11 @@ command line options ``-l, --showlocals`` show locals in tracebacks (disabled by default). ``--report=opts`` - comma separated options, valid: skipped,xfailed + show more info, valid: skipped,xfailed ``--tb=style`` traceback verboseness (long/short/no). ``--fulltrace`` don't cut any tracebacks (default is to cut). -``--collectonly`` - only collect tests, don't execute them. ``--traceconfig`` trace considerations of conftest.py files. ``--nomagic`` --- a/doc/test/plugin/capture.txt +++ b/doc/test/plugin/capture.txt @@ -116,7 +116,7 @@ command line options ``--capture=method`` - set capturing method during tests: fd (default)|sys|no. + per-test capturing method: one of fd (default)|sys|no. ``-s`` shortcut for --capture=no. --- a/doc/test/plugin/pdb.txt +++ b/doc/test/plugin/pdb.txt @@ -14,7 +14,7 @@ command line options ``--pdb`` - start pdb (the Python debugger) on errors. + start the interactive Python debugger on errors. Start improving this plugin in 30 seconds ========================================= --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -55,28 +55,31 @@ def pytest_addoption(parser): group._addoption('-x', '--exitfirst', action="store_true", dest="exitfirst", default=False, help="exit instantly on first error or failed test."), - group.addoption("--ignore", action="append", metavar="path", - help="ignore path during collection (multi-allowed).") group._addoption('-k', action="store", dest="keyword", default='', help="only run test items matching the given " "space separated keywords. precede a keyword with '-' to negate. " "Terminate the expression with ':' to treat a match as a signal " "to run all subsequent tests. ") - group._addoption('-p', action="append", dest="plugins", default = [], - help=("load the specified plugin after command line parsing. ")) if execnet: group._addoption('-f', '--looponfail', action="store_true", dest="looponfail", default=False, help="run tests, re-run failing test set until all pass.") + group = parser.getgroup("collect", "collection") + group.addoption('--collectonly', + action="store_true", dest="collectonly", + help="only collect tests, don't execute them."), + group.addoption("--ignore", action="append", metavar="path", + help="ignore path during collection (multi-allowed).") + group.addoption('--confcutdir', dest="confcutdir", default=None, + metavar="dir", + help="only load conftest.py's relative to specified dir.") + group = parser.getgroup("debugconfig", "test process debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") - group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", - help="only load conftest.py's relative to specified dir.") if execnet: add_dist_options(parser) else: --- a/py/plugin/pytest_pdb.py +++ b/py/plugin/pytest_pdb.py @@ -13,7 +13,7 @@ def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--pdb', action="store_true", dest="usepdb", default=False, - help="start pdb (the Python debugger) on errors.") + help="start the interactive Python debugger on errors.") def pytest_configure(__multicall__, config): --- a/py/impl/code/_assertionnew.py +++ b/py/impl/code/_assertionnew.py @@ -52,7 +52,7 @@ def interpret(source, frame, should_fail if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " - "compute assert expression before the assert or use --nomagic)") + "compute assert expression before the assert or use --no-assert)") def run(offending_line, frame=None): if frame is None: --- a/py/plugin/pytest_helpconfig.py +++ b/py/plugin/pytest_helpconfig.py @@ -5,10 +5,13 @@ import inspect, sys def pytest_addoption(parser): group = parser.getgroup('debugconfig') + group.addoption('--version', action="store_true", + help="display py lib version and import information.") + group._addoption('-p', action="append", dest="plugins", default = [], + metavar="name", + help="early-load given plugin (multi-allowed).") group.addoption("--help-config", action="store_true", dest="helpconfig", help="show available conftest.py and ENV-variable names.") - group.addoption('--version', action="store_true", - help="display py lib version and import information.") def pytest_configure(__multicall__, config): if config.option.version: --- a/py/plugin/pytest_resultlog.py +++ b/py/plugin/pytest_resultlog.py @@ -1,5 +1,9 @@ -"""resultlog plugin for machine-readable logging of test results. - Useful for buildbot integration code. +"""non-xml machine-readable logging of test results. + Useful for buildbot integration code. See the `PyPy-test`_ + web page for post-processing. + +.. _`PyPy-test`: http://codespeak.net:8099/summary + """ import py --- a/py/plugin/pytest_figleaf.py +++ b/py/plugin/pytest_figleaf.py @@ -1,6 +1,7 @@ """ -write and report coverage data with 'figleaf'. +report python test coverage using the 'figleaf' package. +Not enabled by default (use "-p" or conftest settings to do so). """ import py py.test.importorskip("figleaf") --- a/doc/test/plugin/figleaf.txt +++ b/doc/test/plugin/figleaf.txt @@ -2,12 +2,12 @@ pytest_figleaf plugin ===================== -write and report coverage data with 'figleaf'. +report python test coverage using the 'figleaf' package. .. contents:: :local: - +Not enabled by default (use "-p" or conftest settings to do so). command line options -------------------- @@ -18,7 +18,7 @@ command line options ``--fig-data=dir`` set tracing file, default: ".figleaf". ``--fig-html=dir`` - set html reporting dir, default "html"). + set html reporting dir, default "html". Start improving this plugin in 30 seconds ========================================= --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,42 +1,46 @@ -.. _`helpconfig`: helpconfig.html +.. _`pytest_logxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_logxml.py .. _`terminal`: terminal.html -.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_recwarn.py +.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html -.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_monkeypatch.py +.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_monkeypatch.py +.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_genscript.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html +.. _`genscript`: genscript.html .. _`plugins`: index.html .. _`mark`: mark.html .. _`tmpdir`: tmpdir.html -.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_doctest.py +.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_doctest.py .. _`capture`: capture.html -.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_nose.py -.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_restdoc.py +.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_nose.py +.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html -.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_pastebin.py -.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_tmpdir.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_figleaf.py -.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_hooklog.py -.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_skipping.py +.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pastebin.py +.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_tmpdir.py +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_figleaf.py +.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_hooklog.py +.. _`logxml`: logxml.html +.. _`helpconfig`: helpconfig.html +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout -.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_helpconfig.py +.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html -.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_mark.py +.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_mark.py .. _`get in contact`: ../../contact.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_capture.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_terminal.py +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_pdb.py +.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html .. _`django`: django.html .. _`xmlresult`: xmlresult.html -.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_unittest.py +.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_unittest.py .. _`nose`: nose.html -.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.1.1post1/py/plugin/pytest_resultlog.py +.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_resultlog.py .. _`pdb`: pdb.html --- a/py/plugin/pytest_genscript.py +++ b/py/plugin/pytest_genscript.py @@ -1,4 +1,7 @@ #! /usr/bin/env python +""" +generate standalone test script to be distributed along with an application. +""" import os import zlib @@ -10,7 +13,7 @@ except Importerror: import cPickle as pickle def pytest_addoption(parser): - group = parser.getgroup("general") + group = parser.getgroup("debugconfig") group.addoption("--genscript", action="store", default=None, dest="genscript", metavar="path", help="create standalone py.test script at given target path.") --- a/py/plugin/pytest_logxml.py +++ b/py/plugin/pytest_logxml.py @@ -1,13 +1,13 @@ """ - logxml plugin for machine-readable logging of test results. - Based on initial code from Ross Lawley. + logging of test results in JUnit-XML format, for use with Hudson + and build integration servers. Based on initial code from Ross Lawley. """ import py import time def pytest_addoption(parser): - group = parser.getgroup("general") + group = parser.getgroup("terminal reporting") group.addoption('--xml', action="store", dest="xmlpath", metavar="path", default=None, help="create junit-xml style report file at the given path.") --- a/py/plugin/pytest_capture.py +++ b/py/plugin/pytest_capture.py @@ -91,7 +91,7 @@ def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--capture', action="store", default=None, metavar="method", type="choice", choices=['fd', 'sys', 'no'], - help="set capturing method during tests: fd (default)|sys|no.") + help="per-test capturing method: one of fd (default)|sys|no.") group._addoption('-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") --- a/py/plugin/pytest_runner.py +++ b/py/plugin/pytest_runner.py @@ -12,7 +12,7 @@ def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--boxed', action="store_true", dest="boxed", default=False, - help="box each test run in a separate process") + help="box each test run in a separate process (unix)") # XXX move to pytest_sessionstart and fix py.test owns tests def pytest_configure(config): --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -8,7 +8,7 @@ mark_ generic mechanism for marking pyth pdb_ interactive debugging with the Python Debugger. -figleaf_ write and report coverage data with 'figleaf'. +figleaf_ report python test coverage using the 'figleaf' package. coverage_ (3rd) for testing with Ned's coverage module @@ -28,15 +28,19 @@ oejskit_ (3rd) run javascript tests in r django_ (3rd) for testing django applications +genscript_ generate standalone test script to be distributed along with an application. + reporting and failure logging ============================= pastebin_ submit failure or test session information to a pastebin service. +logxml_ logging of test results in JUnit-XML format, for use with Hudson + xmlresult_ (3rd) for generating xml reports and CruiseControl integration -resultlog_ resultlog plugin for machine-readable logging of test results. +resultlog_ non-xml machine-readable logging of test results. terminal_ Implements terminal reporting of the full testing process. --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -7,9 +7,9 @@ plugins = [ 'skipping mark pdb figleaf coverage ' 'monkeypatch capture recwarn tmpdir',), ('testing domains', - 'oejskit django'), + 'oejskit django genscript'), ('reporting and failure logging', - 'pastebin xmlresult resultlog terminal',), + 'pastebin logxml xmlresult resultlog terminal',), ('other testing conventions', 'unittest nose doctest restdoc'), ('core debugging / help functionality', --- a/py/plugin/pytest_terminal.py +++ b/py/plugin/pytest_terminal.py @@ -7,7 +7,7 @@ import py import sys def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", after="general") + group = parser.getgroup("terminal reporting", "reporting", after="general") group._addoption('-v', '--verbose', action="count", dest="verbose", default=0, help="increase verbosity."), group._addoption('-l', '--showlocals', @@ -15,7 +15,7 @@ def pytest_addoption(parser): help="show locals in tracebacks (disabled by default).") group.addoption('--report', action="store", dest="report", default=None, metavar="opts", - help="comma separated options, valid: skipped,xfailed") + help="show more info, valid: skipped,xfailed") group._addoption('--tb', metavar="style", action="store", dest="tbstyle", default='long', type="choice", choices=['long', 'short', 'no'], @@ -25,9 +25,6 @@ def pytest_addoption(parser): help="don't cut any tracebacks (default is to cut).") group = parser.getgroup("debugconfig") - group.addoption('--collectonly', - action="store_true", dest="collectonly", - help="only collect tests, don't execute them."), group.addoption('--traceconfig', action="store_true", dest="traceconfig", default=False, help="trace considerations of conftest.py files."), --- a/py/plugin/pytest_pastebin.py +++ b/py/plugin/pytest_pastebin.py @@ -27,7 +27,7 @@ class url: show = base + "/show/" def pytest_addoption(parser): - group = parser.getgroup("general") + group = parser.getgroup("terminal reporting") group._addoption('--pastebin', metavar="mode", action='store', dest="pastebin", default=None, type="choice", choices=['failed', 'all'], --- a/py/plugin/pytest_doctest.py +++ b/py/plugin/pytest_doctest.py @@ -27,7 +27,7 @@ from py.impl.code.code import TerminalRe import doctest def pytest_addoption(parser): - group = parser.getgroup("general") + group = parser.getgroup("collect") group.addoption("--doctest-modules", action="store_true", default=False, help="run doctests in all .py modules", From commits-noreply at bitbucket.org Sun Jan 3 13:27:22 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 12:27:22 +0000 (UTC) Subject: [py-svn] py-trunk commit 82eaef6c64de: fix python3 issues, add missing plugin docs Message-ID: <20100103122722.AABBA7EF28@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262521626 -3600 # Node ID 82eaef6c64de1ca5baf7e68d55bc8ac7b742e718 # Parent 472d0a7c413644069c025de0923d9719027c4326 fix python3 issues, add missing plugin docs --- a/testing/plugin/test_pytest_restdoc.py +++ b/testing/plugin/test_pytest_restdoc.py @@ -1,3 +1,4 @@ +import py from py.plugin.pytest_restdoc import deindent def test_deindent(): @@ -9,6 +10,9 @@ def test_deindent(): assert deindent(' foo\n bar\n') == ' foo\nbar\n' class TestDoctest: + def setup_class(cls): + py.test.importorskip("docutils") + def pytest_funcarg__testdir(self, request): testdir = request.getfuncargvalue("testdir") testdir.plugins.append("restdoc") --- a/py/__init__.py +++ b/py/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ py.test and pylib: rapid testing and development utils --- /dev/null +++ b/doc/test/plugin/genscript.txt @@ -0,0 +1,29 @@ + +pytest_genscript plugin +======================= + +generate standalone test script to be distributed along with an application. + +.. contents:: + :local: + + + +command line options +-------------------- + + +``--genscript=path`` + create standalone py.test script at given target path. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_genscript.py`_ plugin source code +2. put it somewhere as ``pytest_genscript.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- /dev/null +++ b/doc/test/plugin/logxml.txt @@ -0,0 +1,29 @@ + +pytest_logxml plugin +==================== + +logging of test results in JUnit-XML format, for use with Hudson + +.. contents:: + :local: + +and build integration servers. Based on initial code from Ross Lawley. + +command line options +-------------------- + + +``--xml=path`` + create junit-xml style report file at the given path. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_logxml.py`_ plugin source code +2. put it somewhere as ``pytest_logxml.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/testing/path/conftest.py +++ b/testing/path/conftest.py @@ -17,7 +17,7 @@ def pytest_funcarg__repowc1(request): ) for x in ('test_remove', 'test_move', 'test_status_deleted'): if request.function.__name__.startswith(x): - print >>sys.stderr, ("saving repo", repo, "for", request.function) + #print >>sys.stderr, ("saving repo", repo, "for", request.function) _savedrepowc = save_repowc(repo, wc) request.addfinalizer(lambda: restore_repowc(_savedrepowc)) return repo, repourl, wc @@ -67,7 +67,7 @@ def save_repowc(repo, wc): def restore_repowc(obj): savedrepo, savedwc = obj - print >>sys.stderr, ("restoring", savedrepo) + #print >>sys.stderr, ("restoring", savedrepo) repo = savedrepo.new(basename=savedrepo.basename[:-2]) assert repo.check() wc = savedwc.new(basename=savedwc.basename[:-2]) --- a/py/plugin/pytest_genscript.py +++ b/py/plugin/pytest_genscript.py @@ -44,16 +44,17 @@ def main(pybasedir, outfile, infile): name2src = {} for f in files: k = f.replace(os.sep, ".")[:-3] - name2src[k] = open(f, "rb").read() + name2src[k] = open(f, "r").read() data = pickle.dumps(name2src, 2) data = zlib.compress(data, 9) data = base64.encodestring(data) + data = data.decode("ascii") - exe = open(infile, "rb").read() + exe = open(infile, "r").read() exe = exe.replace("@SOURCES@", data) - open(outfile, "wb").write(exe) + open(outfile, "w").write(exe) os.chmod(outfile, 493) # 0755 sys.stdout.write("generated standalone py.test at %r, have fun!\n" % outfile) From commits-noreply at bitbucket.org Sun Jan 3 14:19:53 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 13:19:53 +0000 (UTC) Subject: [py-svn] py-trunk commit bf69999b6108: relax a test to pass on jython and fix install docs to include genscript standalone usage. Message-ID: <20100103131953.0B01E7EF2E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262524771 -3600 # Node ID bf69999b61082769e94ef1cd4ad6590474e99061 # Parent 82eaef6c64de1ca5baf7e68d55bc8ac7b742e718 relax a test to pass on jython and fix install docs to include genscript standalone usage. --- a/bin-for-dist/test_install.py +++ b/bin-for-dist/test_install.py @@ -166,7 +166,8 @@ def test_cmdline_entrypoints(monkeypatch expected = "%s-jython" % script assert expected in points for script in unversioned_scripts: - assert script in points + assert script not in points + points = cmdline_entrypoints((2,5,1), "xyz", 'pypy-c-XYZ') for script in versioned_scripts: expected = "%s-pypy-c-XYZ" % script --- a/setup.py +++ b/setup.py @@ -63,20 +63,21 @@ def main(): ) def cmdline_entrypoints(versioninfo, platform, basename): - if basename.startswith("pypy"): - points = {'py.test-%s' % basename: 'py.cmdline:pytest', - 'py.which-%s' % basename: 'py.cmdline:pywhich',} - elif platform.startswith('java'): + if platform.startswith('java'): points = {'py.test-jython': 'py.cmdline:pytest', 'py.which-jython': 'py.cmdline:pywhich'} - else: # cpython - points = { - 'py.test-%s.%s' % versioninfo[:2] : 'py.cmdline:pytest', - 'py.which-%s.%s' % versioninfo[:2] : 'py.cmdline:pywhich' - } - for x in ['py.cleanup', 'py.convert_unittest', 'py.countloc', - 'py.lookup', 'py.svnwcrevert', 'py.which', 'py.test']: - points[x] = "py.cmdline:%s" % x.replace('.','') + else: + if basename.startswith("pypy"): + points = {'py.test-%s' % basename: 'py.cmdline:pytest', + 'py.which-%s' % basename: 'py.cmdline:pywhich',} + else: # cpython + points = { + 'py.test-%s.%s' % versioninfo[:2] : 'py.cmdline:pytest', + 'py.which-%s.%s' % versioninfo[:2] : 'py.cmdline:pywhich' + } + for x in ['py.cleanup', 'py.convert_unittest', 'py.countloc', + 'py.lookup', 'py.svnwcrevert', 'py.which', 'py.test']: + points[x] = "py.cmdline:%s" % x.replace('.','') return points def make_entry_points(): --- a/doc/install.txt +++ b/doc/install.txt @@ -1,11 +1,6 @@ -.. - ============== - Downloading - ============== .. _`index page`: http://pypi.python.org/pypi/py/ - py.test/pylib installation info in a nutshell =================================================== @@ -15,7 +10,7 @@ py.test/pylib installation info in a nut **Requirements**: setuptools_ or Distribute_ -**Installers**: easy_install_ and pip_ +**Installers**: easy_install_ and pip_ or `standalone`_ (new for 1.2) **Distribution names**: @@ -24,7 +19,7 @@ py.test/pylib installation info in a nut * debian: ``python-codespeak-lib`` * gentoo: ``pylib`` -**Installed scripts**: see `bin`_ for which scripts are installed. +**Installed scripts**: see `bin`_ for which and how scripts are installed. .. _`bin`: bin.html @@ -68,6 +63,25 @@ Maybe you want to head on with the `quic .. _quickstart: test/quickstart.html +.. _standalone: + +Generating a py.test standalone Script +============================================ + +If you are a maintainer or application developer and want users +to run tests you can use a facility to generate a standalone +"py.test" script that you can tell users to run:: + + py.test --genscript=mytest + +will generate a ``mytest`` script that is, in fact, a ``py.test`` under +disguise. You can tell people to download and then e.g. run it like this:: + + python mytest --pastebin=all + +and ask them to send you the resulting URL. The resulting script has +all core features and runs unchanged under Python2 and Python3 interpreters. + Troubleshooting ======================== --- a/testing/pytest/acceptance_test.py +++ b/testing/pytest/acceptance_test.py @@ -61,7 +61,7 @@ class TestGeneralUsage: testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) extra = result.stdout.fnmatch_lines([ - "> import import_fails", + #XXX on jython this fails: "> import import_fails", "E ImportError: No module named does_not_work", ]) assert result.ret == 1 From commits-noreply at bitbucket.org Sun Jan 3 14:33:20 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 13:33:20 +0000 (UTC) Subject: [py-svn] py-trunk commit 53540f84f1fb: update issues, version numbers Message-ID: <20100103133320.609A47EE7B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262525590 -3600 # Node ID 53540f84f1fb6ad010642249252ac83a406cacf1 # Parent bf69999b61082769e94ef1cd4ad6590474e99061 update issues, version numbers --- a/ISSUES.txt +++ b/ISSUES.txt @@ -78,19 +78,6 @@ but a remote one fail because the tests does not contain an "__init__.py". Either give an error or make it work without the __init__.py -introduce a "RootCollector" ----------------------------------------------------------------- -tags: feature 1.2 - -Currently the top collector is a Directory node and -there also is the notion of a "topdir". See to refine -internal handling such that there is a RootCollector -which holds this topdir (or do away with topdirs?). -Make sure this leads to an improvement in how -tests are shown in hudson which currently sometimes -shows "workspace" and sometimes not as the leading -name. - deprecate ensuretemp / introduce funcargs to setup method -------------------------------------------------------------- tags: experimental-wish 1.2 --- a/py/apipkg.py +++ b/py/apipkg.py @@ -8,7 +8,7 @@ see http://pypi.python.org/pypi/apipkg import sys from types import ModuleType -__version__ = "1.0b3" +__version__ = "1.0b4" def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. """ From commits-noreply at bitbucket.org Sun Jan 3 18:20:06 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 17:20:06 +0000 (UTC) Subject: [py-svn] py-trunk commit e9df7d915d89: add potential feature from py-dev discussion Message-ID: <20100103172006.101BB7EF2E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262526746 -3600 # Node ID e9df7d915d89d04ba6a0be0c294b0ff044236695 # Parent 53540f84f1fb6ad010642249252ac83a406cacf1 add potential feature from py-dev discussion --- a/ISSUES.txt +++ b/ISSUES.txt @@ -94,3 +94,14 @@ tags: 1.2 Packages with external dependencies should be moved out of the core distribution. Also figleaf could serve as another prototype for an external plugin. + +consider pytest_addsyspath hook +----------------------------------------- +tags: 1.2 + +py.test could call a new pytest_addsyspath() in order to systematically +allow manipulation of sys.path and to inhibit it via --no-addsyspath +in order to more easily run against installed packages. + +Alternatively it could also be done via the config object +and pytest_configure. From commits-noreply at bitbucket.org Sun Jan 3 18:20:07 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 3 Jan 2010 17:20:07 +0000 (UTC) Subject: [py-svn] py-trunk commit 63aaeba61e94: avoid dependency on directory ordering Message-ID: <20100103172007.D790E7EE7A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1262539192 -3600 # Node ID 63aaeba61e94b4fac8f53964cdc1ba25794626e0 # Parent e9df7d915d89d04ba6a0be0c294b0ff044236695 avoid dependency on directory ordering --- a/testing/path/test_local.py +++ b/testing/path/test_local.py @@ -188,9 +188,9 @@ class TestLocalPath(common.CommonFSTests p2 = tmpdir.ensure("b","2") p3 = tmpdir.ensure("breadth") l = list(tmpdir.visit(lambda x: x.check(file=1))) - assert l[0] == p1 - assert l[1] == p2 - assert l[2] == p3 + assert len(l) == 3 + # check that breadth comes last + assert l[2] == p3 class TestExecutionOnWindows: pytestmark = py.test.mark.skipif("sys.platform != 'win32'") From commits-noreply at bitbucket.org Sun Jan 10 13:44:10 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 12:44:10 +0000 (UTC) Subject: [py-svn] apipkg commit 9e411e7c844a: add an alias of __getattr__ to __makeattr to deal with bpythons sandboxing on introspection Message-ID: <20100110124410.C3D6D7EF26@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User Ronny Pfannschmidt # Date 1262865258 -3600 # Node ID 9e411e7c844a86a4ebe16786fc9f6f1964dd2da9 # Parent b517c03e87afaf52144479d7affb74fd3f512d4c add an alias of __getattr__ to __makeattr to deal with bpythons sandboxing on introspection --- a/apipkg.py +++ b/apipkg.py @@ -54,7 +54,15 @@ class ApiModule(ModuleType): return '' % (self.__name__, " ".join(l)) return '' % (self.__name__,) - def __getattr__(self, name): + def __makeattr(self, name): + ''' + load the attribute `name` + assign it to self and return is + + also aliased to __getattr__ + the name __makeattr__ is used + ''' + target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') @@ -72,6 +80,8 @@ class ApiModule(ModuleType): del self.__map__[name] return result + __getattr__ = __makeattr # support getattr by aliasing + def __dict__(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] @@ -79,6 +89,10 @@ class ApiModule(ModuleType): if dict is not None: hasattr(self, 'some') for name in self.__all__: - hasattr(self, name) # force attribute load, ignore errors + # force attribute load, ignore errors + try: + self.__makeattr(name) + except AttributeError: + pass return dict __dict__ = property(__dict__) From commits-noreply at bitbucket.org Sun Jan 10 13:44:12 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 12:44:12 +0000 (UTC) Subject: [py-svn] apipkg commit 5fc11da5b2f6: add a test for ronny's bpython '__makeattr' work around Message-ID: <20100110124412.A4AEF7EF2A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1263127131 -3600 # Node ID 5fc11da5b2f62f4bd225672b3f01b4ff3747afb3 # Parent 9e411e7c844a86a4ebe16786fc9f6f1964dd2da9 add a test for ronny's bpython '__makeattr' work around --- a/test_apipkg.py +++ b/test_apipkg.py @@ -259,3 +259,14 @@ def test_onfirstaccess_setsnewattr(tmpdi assert not hasattr(mod, '__onfirstaccess__') assert not hasattr(mod, '__onfirstaccess__') assert '__onfirstaccess__' not in vars(mod) + +def test_bpython_getattr_override(tmpdir, monkeypatch): + def patchgetattr(self, name): + raise AttributeError(name) + monkeypatch.setattr(apipkg.ApiModule, '__getattr__', patchgetattr) + api = apipkg.ApiModule('bpy', { + 'abspath': 'os.path:abspath', + }) + d = api.__dict__ + assert 'abspath' in d + --- a/apipkg.py +++ b/apipkg.py @@ -55,14 +55,7 @@ class ApiModule(ModuleType): return '' % (self.__name__,) def __makeattr(self, name): - ''' - load the attribute `name` - assign it to self and return is - - also aliased to __getattr__ - the name __makeattr__ is used - ''' - + """lazily compute value for name or raise AttributeError if unknown.""" target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') @@ -80,7 +73,7 @@ class ApiModule(ModuleType): del self.__map__[name] return result - __getattr__ = __makeattr # support getattr by aliasing + __getattr__ = __makeattr def __dict__(self): # force all the content of the module to be loaded when __dict__ is read @@ -89,7 +82,6 @@ class ApiModule(ModuleType): if dict is not None: hasattr(self, 'some') for name in self.__all__: - # force attribute load, ignore errors try: self.__makeattr(name) except AttributeError: From commits-noreply at bitbucket.org Sun Jan 10 13:44:14 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 12:44:14 +0000 (UTC) Subject: [py-svn] apipkg commit c76615baa4a2: also transfer __loader__ attribute (thanks ralf schmitt), bump version Message-ID: <20100110124414.865EE7EF2E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1262525013 -3600 # Node ID c76615baa4a26f89adb1c52b26f401b35001006d # Parent 5fc11da5b2f62f4bd225672b3f01b4ff3747afb3 also transfer __loader__ attribute (thanks ralf schmitt), bump version --- a/test_apipkg.py +++ b/test_apipkg.py @@ -165,16 +165,18 @@ def test_initpkg_replaces_sysmodules(mon assert newmod != mod assert newmod.x == py.std.os.path.abspath -def test_initpkg_transfers_version_and_file(monkeypatch): +def test_initpkg_transfers_attrs(monkeypatch): mod = type(sys)('hello') mod.__version__ = 10 mod.__file__ = "hello.py" + mod.__loader__ = "loader" monkeypatch.setitem(sys.modules, 'hello', mod) apipkg.initpkg('hello', {}) newmod = sys.modules['hello'] assert newmod != mod assert newmod.__file__ == mod.__file__ assert newmod.__version__ == mod.__version__ + assert newmod.__loader__ == mod.__loader__ def test_initpkg_defaults(monkeypatch): mod = type(sys)('hello') --- a/apipkg.py +++ b/apipkg.py @@ -8,7 +8,7 @@ see http://pypi.python.org/pypi/apipkg import sys from types import ModuleType -__version__ = "1.0b3" +__version__ = "1.0b4" def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. """ @@ -17,6 +17,7 @@ def initpkg(pkgname, exportdefs): mod.__file__ = getattr(oldmod, '__file__', None) mod.__version__ = getattr(oldmod, '__version__', None) mod.__path__ = getattr(oldmod, '__path__', None) + mod.__loader__ = getattr(oldmod, '__loader__', None) sys.modules[pkgname] = mod def importobj(modpath, attrname): --- a/readme.txt +++ b/readme.txt @@ -5,9 +5,11 @@ With apipkg you can control the exported python package and greatly reduce the number of imports for your users. It is a `small pure python module`_ that works on virtually all Python versions, including CPython2.3 to Python3.1, Jython and PyPy. It co-operates -well with Python's ``help()`` system and common command line completion -tools. Usage is very simple: you can require 'apipkg' as a dependency -or you can copy paste the <100 Lines of code into your project. +well with Python's ``help()`` system, custom importers (PEP302) and common +command line completion tools. + +Usage is very simple: you can require 'apipkg' as a dependency or you +can copy paste the <100 Lines of code into your project. Tutorial example ------------------- From commits-noreply at bitbucket.org Sun Jan 10 13:57:39 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 12:57:39 +0000 (UTC) Subject: [py-svn] py-trunk commit 92f94395f31c: porting latest apipkg Message-ID: <20100110125739.61EB17EF16@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263128095 -3600 # Node ID 92f94395f31c1e99a0db0300599e26f130b192d5 # Parent 63aaeba61e94b4fac8f53964cdc1ba25794626e0 porting latest apipkg --- a/py/apipkg.py +++ b/py/apipkg.py @@ -55,7 +55,8 @@ class ApiModule(ModuleType): return '' % (self.__name__, " ".join(l)) return '' % (self.__name__,) - def __getattr__(self, name): + def __makeattr(self, name): + """lazily compute value for name or raise AttributeError if unknown.""" target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') @@ -73,6 +74,8 @@ class ApiModule(ModuleType): del self.__map__[name] return result + __getattr__ = __makeattr + def __dict__(self): # force all the content of the module to be loaded when __dict__ is read dictdescr = ModuleType.__dict__['__dict__'] @@ -80,6 +83,9 @@ class ApiModule(ModuleType): if dict is not None: hasattr(self, 'some') for name in self.__all__: - hasattr(self, name) # force attribute load, ignore errors + try: + self.__makeattr(name) + except AttributeError: + pass return dict __dict__ = property(__dict__) From commits-noreply at bitbucket.org Sun Jan 10 21:50:01 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 20:50:01 +0000 (UTC) Subject: [py-svn] py-trunk commit c85b3b8db30c: fix some "import py" test issues, and prevent "genscript" script from having dist-options Message-ID: <20100110205001.0B15C7EF12@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263152737 -3600 # Node ID c85b3b8db30cd5d814ede4c34cc3694dd77b1095 # Parent 92f94395f31c1e99a0db0300599e26f130b192d5 fix some "import py" test issues, and prevent "genscript" script from having dist-options --- a/testing/plugin/test_pytest_capture.py +++ b/testing/plugin/test_pytest_capture.py @@ -250,10 +250,8 @@ class TestLoggingInteraction: def test_capturing_and_logging_fundamentals(self, testdir): # here we check a fundamental feature - rootdir = str(py.path.local(py.__file__).dirpath().dirpath()) p = testdir.makepyfile(""" import sys, os - sys.path.insert(0, %r) import py, logging if hasattr(os, 'dup'): cap = py.io.StdCaptureFD(out=False, in_=False) @@ -262,7 +260,7 @@ class TestLoggingInteraction: logging.warn("hello1") outerr = cap.suspend() - print ("suspeneded and captured %%s" %% (outerr,)) + print ("suspeneded and captured %s" % (outerr,)) logging.warn("hello2") @@ -270,8 +268,8 @@ class TestLoggingInteraction: logging.warn("hello3") outerr = cap.suspend() - print ("suspend2 and captured %%s" %% (outerr,)) - """ % rootdir) + print ("suspend2 and captured %s" % (outerr,)) + """) result = testdir.runpython(p) assert result.stdout.fnmatch_lines([ "suspeneded and captured*hello1*", --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -5,6 +5,8 @@ import py try: import execnet + if not py.path.local(py.__file__).check(): + raise ImportError("") except ImportError: execnet = None else: --- a/testing/pytest/acceptance_test.py +++ b/testing/pytest/acceptance_test.py @@ -81,11 +81,11 @@ class TestGeneralUsage: import py assert hasattr(py.test, 'mark') """) - result = testdir._run(sys.executable, p) + result = testdir.runpython(p) assert result.ret == 0 def test_pydoc(self, testdir): - result = testdir._run(sys.executable, "-c", "import py ; help(py.test)") + result = testdir.runpython_c("import py ; help(py.test)") assert result.ret == 0 s = result.stdout.str() assert 'MarkGenerator' in s --- a/testing/plugin/test_pytest_genscript.py +++ b/testing/plugin/test_pytest_genscript.py @@ -31,7 +31,7 @@ def test_rundist(testdir, standalone): pass """) result = standalone.run(sys.executable, testdir, '-n', '3') - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*1 passed*" + assert result.ret == 2 + result.stderr.fnmatch_lines([ + "*no such option*" ]) --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -321,7 +321,21 @@ class TmpTestdir: return (sys.executable, "-c", source,) def runpython(self, script): - return self.run(py.std.sys.executable, script) + s = self._getsysprepend() + if s: + script.write(s + "\n" + script.read()) + return self.run(sys.executable, script) + + def _getsysprepend(self): + if not self.request.config.getvalue("toolsonpath"): + s = "import sys ; sys.path.insert(0, %r) ; " % str(py._dir.dirpath()) + else: + s = "" + return s + + def runpython_c(self, command): + command = self._getsysprepend() + command + return self.run(py.std.sys.executable, "-c", command) def runpytest(self, *args): p = py.path.local.make_numbered_dir(prefix="runpytest-", From commits-noreply at bitbucket.org Sun Jan 10 21:50:03 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 20:50:03 +0000 (UTC) Subject: [py-svn] py-trunk commit 25741d5e3df1: (experimental) allow cmdline arguments to deep-point to a test, also remove virtually redundant session.getinitialitems() calls Message-ID: <20100110205003.C54127EF14@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263155376 -3600 # Node ID 25741d5e3df1db86b6848a0cf1721961b2b0e369 # Parent c85b3b8db30cd5d814ede4c34cc3694dd77b1095 (experimental) allow cmdline arguments to deep-point to a test, also remove virtually redundant session.getinitialitems() calls --- a/py/impl/test/dist/dsession.py +++ b/py/impl/test/dist/dsession.py @@ -85,8 +85,7 @@ class DSession(Session): # raise config.Error("dist mode %r needs test execution environments, " # "none found." %(config.option.dist)) - def main(self, colitems=None): - colitems = self.getinitialitems(colitems) + def main(self, colitems): self.sessionstarts() self.setup() exitstatus = self.loop(colitems) --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -117,28 +117,28 @@ class TestConfigAPI: py.test.raises(ValueError, "config.setsessionclass(Session1)") -class TestConfigApi_getcolitems: - def test_getcolitems_onedir(self, testdir): +class TestConfigApi_getinitialnodes: + def test_onedir(self, testdir): config = testdir.reparseconfig([testdir.tmpdir]) - colitems = config.getcolitems() + colitems = config.getinitialnodes() assert len(colitems) == 1 col = colitems[0] assert isinstance(col, py.test.collect.Directory) for col in col.listchain(): assert col.config is config - def test_getcolitems_twodirs(self, testdir, tmpdir): + def test_twodirs(self, testdir, tmpdir): config = testdir.reparseconfig([tmpdir, tmpdir]) - colitems = config.getcolitems() + colitems = config.getinitialnodes() assert len(colitems) == 2 col1, col2 = colitems assert col1.name == col2.name assert col1.parent == col2.parent - def test_getcolitems_curdir_and_subdir(self, testdir, tmpdir): + def test_curdir_and_subdir(self, testdir, tmpdir): a = tmpdir.ensure("a", dir=1) config = testdir.reparseconfig([tmpdir, a]) - colitems = config.getcolitems() + colitems = config.getinitialnodes() assert len(colitems) == 2 col1, col2 = colitems assert col1.name == tmpdir.basename @@ -147,10 +147,10 @@ class TestConfigApi_getcolitems: for subcol in col.listchain(): assert col.config is config - def test__getcol_global_file(self, testdir, tmpdir): + def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") config = testdir.reparseconfig([x]) - col = config.getfsnode(x) + col, = config.getinitialnodes() assert isinstance(col, py.test.collect.Module) assert col.name == 'x.py' assert col.parent.name == tmpdir.basename @@ -158,21 +158,21 @@ class TestConfigApi_getcolitems: for col in col.listchain(): assert col.config is config - def test__getcol_global_dir(self, testdir, tmpdir): + def test_global_dir(self, testdir, tmpdir): x = tmpdir.ensure("a", dir=1) config = testdir.reparseconfig([x]) - col = config.getfsnode(x) + col, = config.getinitialnodes() assert isinstance(col, py.test.collect.Directory) print(col.listchain()) assert col.name == 'a' assert isinstance(col.parent, RootCollector) assert col.config is config - def test__getcol_pkgfile(self, testdir, tmpdir): + def test_pkgfile(self, testdir, tmpdir): x = tmpdir.ensure("x.py") tmpdir.ensure("__init__.py") config = testdir.reparseconfig([x]) - col = config.getfsnode(x) + col, = config.getinitialnodes() assert isinstance(col, py.test.collect.Module) assert col.name == 'x.py' assert col.parent.name == x.dirpath().basename @@ -214,7 +214,9 @@ class TestConfig_gettopdir: Z = tmp.ensure('Z', dir=1) assert gettopdir([c]) == a assert gettopdir([c, Z]) == tmp - + assert gettopdir(["%s::xyc" % c]) == a + assert gettopdir(["%s::xyc::abc" % c]) == a + assert gettopdir(["%s::xyc" % c, "%s::abc" % Z]) == tmp def test_options_on_small_file_do_not_blow_up(testdir): def runfiletest(opts): --- a/py/impl/test/looponfail/remote.py +++ b/py/impl/test/looponfail/remote.py @@ -142,7 +142,7 @@ def slave_runsession(channel, config, fu continue colitems.append(colitem) else: - colitems = None + colitems = config.getinitialnodes() session.shouldclose = channel.isclosed class Failures(list): --- a/testing/pytest/test_collect.py +++ b/testing/pytest/test_collect.py @@ -66,7 +66,7 @@ class TestCollector: return MyDirectory(path, parent=parent) """) config = testdir.parseconfig(hello) - node = config.getfsnode(hello) + node = config.getnode(hello) assert isinstance(node, py.test.collect.File) assert node.name == "hello.xxx" names = config._rootcol.totrail(node) @@ -84,7 +84,7 @@ class TestCollectFS: tmpdir.ensure("normal", 'test_found.py') tmpdir.ensure('test_found.py') - col = testdir.parseconfig(tmpdir).getfsnode(tmpdir) + col = testdir.parseconfig(tmpdir).getnode(tmpdir) items = col.collect() names = [x.name for x in items] assert len(items) == 2 @@ -93,7 +93,7 @@ class TestCollectFS: def test_found_certain_testfiles(self, testdir): p1 = testdir.makepyfile(test_found = "pass", found_test="pass") - col = testdir.parseconfig(p1).getfsnode(p1.dirpath()) + col = testdir.parseconfig(p1).getnode(p1.dirpath()) items = col.collect() # Directory collect returns files sorted by name assert len(items) == 2 assert items[1].name == 'test_found.py' @@ -106,7 +106,7 @@ class TestCollectFS: testdir.makepyfile(test_two="hello") p1.dirpath().mkdir("dir2") config = testdir.parseconfig() - col = config.getfsnode(p1.dirpath()) + col = config.getnode(p1.dirpath()) names = [x.name for x in col.collect()] assert names == ["dir1", "dir2", "test_one.py", "test_two.py", "x"] @@ -120,7 +120,7 @@ class TestCollectPluginHookRelay: config = testdir.Config() config.pluginmanager.register(Plugin()) config.parse([tmpdir]) - col = config.getfsnode(tmpdir) + col = config.getnode(tmpdir) testdir.makefile(".abc", "xyz") res = col.collect() assert len(wascalled) == 1 @@ -203,20 +203,36 @@ class TestRootCol: tmpdir.ensure("a", "__init__.py") x = tmpdir.ensure("a", "trail.py") config = testdir.reparseconfig([x]) - col = config.getfsnode(x) + col = config.getnode(x) trail = config._rootcol.totrail(col) col2 = config._rootcol.fromtrail(trail) assert col2 == col def test_totrail_topdir_and_beyond(self, testdir, tmpdir): config = testdir.reparseconfig() - col = config.getfsnode(config.topdir) + col = config.getnode(config.topdir) trail = config._rootcol.totrail(col) col2 = config._rootcol.fromtrail(trail) assert col2.fspath == config.topdir assert len(col2.listchain()) == 1 - py.test.raises(config.Error, "config.getfsnode(config.topdir.dirpath())") - #col3 = config.getfsnode(config.topdir.dirpath()) + py.test.raises(config.Error, "config.getnode(config.topdir.dirpath())") + #col3 = config.getnode(config.topdir.dirpath()) #py.test.raises(ValueError, # "col3._totrail()") + def test_argid(self, testdir, tmpdir): + cfg = testdir.parseconfig() + p = testdir.makepyfile("def test_func(): pass") + item = cfg.getnode("%s::test_func" % p) + assert item.name == "test_func" + + def test_argid_with_method(self, testdir, tmpdir): + cfg = testdir.parseconfig() + p = testdir.makepyfile(""" + class TestClass: + def test_method(self): pass + """) + item = cfg.getnode("%s::TestClass::()::test_method" % p) + assert item.name == "test_method" + item = cfg.getnode("%s::TestClass::test_method" % p) + assert item.name == "test_method" --- a/py/impl/test/collect.py +++ b/py/impl/test/collect.py @@ -383,24 +383,10 @@ class RootCollector(Directory): Directory.__init__(self, config.topdir, parent=None, config=config) self.name = None - def getfsnode(self, path): - path = py.path.local(path) - if not path.check(): - raise self.config.Error("file not found: %s" %(path,)) - topdir = self.config.topdir - if path != topdir and not path.relto(topdir): - raise self.config.Error("path %r is not relative to %r" % - (str(path), str(self.fspath))) - # assumtion: pytest's fs-collector tree follows the filesystem tree - basenames = filter(None, path.relto(topdir).split(path.sep)) - try: - return self.getbynames(basenames) - except ValueError: - raise self.config.Error("can't collect: %s" % str(path)) - def getbynames(self, names): current = self.consider(self.config.topdir) - for name in names: + while names: + name = names.pop(0) if name == ".": # special "identity" name continue l = [] @@ -409,8 +395,12 @@ class RootCollector(Directory): l.append(x) elif x.fspath == current.fspath.join(name): l.append(x) + elif x.name == "()": + names.insert(0, name) + l.append(x) + break if not l: - raise ValueError("no node named %r in %r" %(name, current)) + raise ValueError("no node named %r below %r" %(name, current)) current = l[0] return current --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -158,7 +158,7 @@ class TmpTestdir: config = self.parseconfig(*args) session = config.initsession() rec = self.getreportrecorder(config) - colitems = [config.getfsnode(arg) for arg in config.args] + colitems = [config.getnode(arg) for arg in config.args] items = list(session.genitems(colitems)) return items, rec @@ -190,7 +190,8 @@ class TmpTestdir: config.pluginmanager.do_configure(config) session = config.initsession() reprec = self.getreportrecorder(config) - session.main() + colitems = config.getinitialnodes() + session.main(colitems) config.pluginmanager.do_unconfigure(config) return reprec @@ -251,7 +252,7 @@ class TmpTestdir: def getfscol(self, path, configargs=()): self.config = self.parseconfig(path, *configargs) self.session = self.config.initsession() - return self.config.getfsnode(path) + return self.config.getnode(path) def getmodulecol(self, source, configargs=(), withinit=False): kw = {self.request.function.__name__: py.code.Source(source).strip()} @@ -266,7 +267,7 @@ class TmpTestdir: plugin = self.config.pluginmanager.getplugin("runner") plugin.pytest_configure(config=self.config) - return self.config.getfsnode(path) + return self.config.getnode(path) def popen(self, cmdargs, stdout, stderr, **kw): if not hasattr(py.std, 'subprocess'): --- a/py/impl/test/session.py +++ b/py/impl/test/session.py @@ -93,15 +93,8 @@ class Session(object): exitstatus=exitstatus, ) - def getinitialitems(self, colitems): - if colitems is None: - colitems = [self.config.getfsnode(arg) - for arg in self.config.args] - return colitems - - def main(self, colitems=None): + def main(self, colitems): """ main loop for running tests. """ - colitems = self.getinitialitems(colitems) self.shouldstop = False self.sessionstarts() exitstatus = outcome.EXIT_OK --- a/testing/pytest/test_deprecated_api.py +++ b/testing/pytest/test_deprecated_api.py @@ -50,7 +50,7 @@ class TestCollectDeprecated: def check2(self): pass """)) config = testdir.parseconfig(somefile) - dirnode = config.getfsnode(somefile.dirpath()) + dirnode = config.getnode(somefile.dirpath()) colitems = dirnode.collect() w = recwarn.pop(DeprecationWarning) assert w.filename.find("conftest.py") != -1 @@ -174,7 +174,7 @@ class TestCollectDeprecated: """) testme = testdir.makefile('xxx', testme="hello") config = testdir.parseconfig(testme) - col = config.getfsnode(testme) + col = config.getnode(testme) assert col.collect() == [] @@ -236,11 +236,11 @@ class TestDisabled: """) config = testdir.parseconfig() if name == "Directory": - config.getfsnode(testdir.tmpdir) + config.getnode(testdir.tmpdir) elif name in ("Module", "File"): - config.getfsnode(p) + config.getnode(p) else: - fnode = config.getfsnode(p) + fnode = config.getnode(p) recwarn.clear() fnode.collect() w = recwarn.pop(DeprecationWarning) @@ -317,7 +317,7 @@ def test_conftest_non_python_items(recwa testdir.maketxtfile(x="") config = testdir.parseconfig() recwarn.clear() - dircol = config.getfsnode(checkfile.dirpath()) + dircol = config.getnode(checkfile.dirpath()) w = recwarn.pop(DeprecationWarning) assert str(w.message).find("conftest.py") != -1 colitems = dircol.collect() @@ -325,7 +325,7 @@ def test_conftest_non_python_items(recwa assert colitems[0].name == "hello.xxx" assert colitems[0].__class__.__name__ == "CustomItem" - item = config.getfsnode(checkfile) + item = config.getnode(checkfile) assert item.name == "hello.xxx" assert item.__class__.__name__ == "CustomItem" @@ -358,14 +358,14 @@ def test_extra_python_files_and_function """) # check that directory collects "check_" files config = testdir.parseconfig() - col = config.getfsnode(checkfile.dirpath()) + col = config.getnode(checkfile.dirpath()) colitems = col.collect() assert len(colitems) == 1 assert isinstance(colitems[0], py.test.collect.Module) # check that module collects "check_" functions and methods config = testdir.parseconfig(checkfile) - col = config.getfsnode(checkfile) + col = config.getnode(checkfile) assert isinstance(col, py.test.collect.Module) colitems = col.collect() assert len(colitems) == 2 --- a/py/impl/test/cmdline.py +++ b/py/impl/test/cmdline.py @@ -13,7 +13,8 @@ def main(args=None): config.parse(args) config.pluginmanager.do_configure(config) session = config.initsession() - exitstatus = session.main() + colitems = config.getinitialnodes() + exitstatus = session.main(colitems) config.pluginmanager.do_unconfigure(config) raise SystemExit(exitstatus) except config.Error: --- a/testing/pytest/dist/test_dsession.py +++ b/testing/pytest/dist/test_dsession.py @@ -396,7 +396,7 @@ class TestDSession: config = testdir.parseconfig('-d', p1, '--tx=popen') dsession = DSession(config) hookrecorder = testdir.getreportrecorder(config).hookrecorder - dsession.main([config.getfsnode(p1)]) + dsession.main([config.getnode(p1)]) rep = hookrecorder.popcall("pytest_runtest_logreport").report assert rep.passed rep = hookrecorder.popcall("pytest_runtest_logreport").report --- a/testing/pytest/test_pickling.py +++ b/testing/pytest/test_pickling.py @@ -75,8 +75,10 @@ class TestConfigPickling: config2 = Config() config2.__setstate__(config1.__getstate__()) assert config2.topdir == py.path.local() - config2_relpaths = [x.relto(config2.topdir) for x in config2.args] - config1_relpaths = [x.relto(config1.topdir) for x in config1.args] + config2_relpaths = [py.path.local(x).relto(config2.topdir) + for x in config2.args] + config1_relpaths = [py.path.local(x).relto(config1.topdir) + for x in config1.args] assert config2_relpaths == config1_relpaths for name, value in config1.option.__dict__.items(): @@ -138,7 +140,7 @@ class TestConfigPickling: testdir.chdir() testdir.makepyfile(hello="def test_x(): pass") config = testdir.parseconfig(tmpdir) - col = config.getfsnode(config.topdir) + col = config.getnode(config.topdir) io = py.io.BytesIO() pickler = pickle.Pickler(io) pickler.dump(col) @@ -152,7 +154,7 @@ class TestConfigPickling: tmpdir = testdir.tmpdir dir1 = tmpdir.ensure("somedir", dir=1) config = testdir.parseconfig() - col = config.getfsnode(config.topdir) + col = config.getnode(config.topdir) col1 = col.join(dir1.basename) assert col1.parent is col io = py.io.BytesIO() --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -26,7 +26,7 @@ def pytest_collect_file(path, parent): ext = path.ext pb = path.purebasename if pb.startswith("test_") or pb.endswith("_test") or \ - path in parent.config.args: + path in parent.config._argfspaths: if ext == ".py": return parent.Module(path, parent=parent) @@ -41,7 +41,7 @@ def pytest_collect_directory(path, paren # define Directory(dir) already if not parent.recfilter(path): # by default special ".cvs", ... # check if cmdline specified this dir or a subdir directly - for arg in parent.config.args: + for arg in parent.config._argfspaths: if path == arg or arg.relto(path): break else: --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -99,7 +99,11 @@ class Config(object): args.append(py.std.os.getcwd()) self.topdir = gettopdir(args) self._rootcol = RootCollector(config=self) - self.args = [py.path.local(x) for x in args] + self._setargs(args) + + def _setargs(self, args): + self.args = list(args) + self._argfspaths = [py.path.local(decodearg(x)[0]) for x in args] # config objects are usually pickled across system # barriers but they contain filesystem paths. @@ -121,10 +125,10 @@ class Config(object): self.__init__(topdir=py.path.local()) self._rootcol = RootCollector(config=self) args, cmdlineopts = repr - args = [self.topdir.join(x) for x in args] + args = [str(self.topdir.join(x)) for x in args] self.option = cmdlineopts self._preparse(args) - self.args = args + self._setargs(args) def ensuretemp(self, string, dir=True): return self.getbasetemp().ensure(string, dir=dir) @@ -149,11 +153,26 @@ class Config(object): return py.path.local.make_numbered_dir(prefix=basename + "-", keep=0, rootdir=basetemp, lock_timeout=None) - def getcolitems(self): - return [self.getfsnode(arg) for arg in self.args] + def getinitialnodes(self): + return [self.getnode(arg) for arg in self.args] - def getfsnode(self, path): - return self._rootcol.getfsnode(path) + def getnode(self, arg): + parts = decodearg(arg) + path = py.path.local(parts.pop(0)) + if not path.check(): + raise self.Error("file not found: %s" %(path,)) + topdir = self.topdir + if path != topdir and not path.relto(topdir): + raise self.Error("path %r is not relative to %r" % + (str(path), str(topdir))) + # assumtion: pytest's fs-collector tree follows the filesystem tree + names = filter(None, path.relto(topdir).split(path.sep)) + names.extend(parts) + try: + return self._rootcol.getbynames(names) + except ValueError: + e = py.std.sys.exc_info()[1] + raise self.Error("can't collect: %s\n%s" % (arg, e.args[0])) def _getcollectclass(self, name, path): try: @@ -282,11 +301,11 @@ def gettopdir(args): if the common base dir resides in a python package parent directory of the root package is returned. """ - args = [py.path.local(arg) for arg in args] - p = args and args[0] or None - for x in args[1:]: + fsargs = [py.path.local(decodearg(arg)[0]) for arg in args] + p = fsargs and fsargs[0] or None + for x in fsargs[1:]: p = p.common(x) - assert p, "cannot determine common basedir of %s" %(args,) + assert p, "cannot determine common basedir of %s" %(fsargs,) pkgdir = p.pypkgpath() if pkgdir is None: if p.check(file=1): @@ -295,6 +314,10 @@ def gettopdir(args): else: return pkgdir.dirpath() +def decodearg(arg): + arg = str(arg) + return arg.split("::") + def onpytestaccess(): # it's enough to have our containing module loaded as # it initializes a per-process config instance From commits-noreply at bitbucket.org Sun Jan 10 22:25:37 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 21:25:37 +0000 (UTC) Subject: [py-svn] py-trunk commit d63f118dd086: add to changelog, remove docstring Message-ID: <20100110212537.977087EEF4@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263157845 -3600 # Node ID d63f118dd086fcfa8b867fd21ee2ec1ec82a6790 # Parent 25741d5e3df1db86b6848a0cf1721961b2b0e369 add to changelog, remove docstring --- a/setup.py +++ b/setup.py @@ -1,4 +1,3 @@ -"""py lib / py.test setup.py file""" import os, sys if sys.version_info >= (3,0): from distribute_setup import use_setuptools @@ -16,7 +15,7 @@ Platforms: Linux, Win32, OSX Interpreters: Python versions 2.4 through to 3.1, Jython 2.5.1. For questions please check out http://pylib.org/contact.html -.. _`py.test`: http://pylib.org/test.html +.. _`py.test`: http://pytest.org .. _`py.path`: http://pylib.org/path.html .. _`py.code`: http://pylib.org/code.html --- a/CHANGELOG +++ b/CHANGELOG @@ -21,6 +21,10 @@ Changes between 1.X and 1.1.1 - new "pytestconfig" funcarg allows access to test config object +- (experimental) allow "py.test path::name1::name2::..." for pointing + to a test within a test collection directly. This might eventually + evolve as a full substitute to "-k" specifications. + - streamlined plugin loading: order is now as documented in customize.html: setuptools, ENV, commandline, conftest. also setuptools entry point names are turned to canonical namees ("pytest_*") From commits-noreply at bitbucket.org Sun Jan 10 23:58:58 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 10 Jan 2010 22:58:58 +0000 (UTC) Subject: [py-svn] py-trunk commit 085f5be42cb9: fix and test bug: dist-testing now works again without execnet/pylib installed remotely. fixes issue65. Message-ID: <20100110225858.D53587EF04@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263163943 -3600 # Node ID 085f5be42cb97cc7bf999aca6479aa1f2eb17d6f # Parent d63f118dd086fcfa8b867fd21ee2ec1ec82a6790 fix and test bug: dist-testing now works again without execnet/pylib installed remotely. fixes issue65. --- a/ISSUES.txt +++ b/ISSUES.txt @@ -42,15 +42,6 @@ test: testing/pytest/dist/test_dsession. Call gateway group termination with a small timeout if available. Should make dist-testing less likely to leave lost processes. -fix dist-testing: execnet needs to be rsynced over automatically ------------------------------------------------------------------- - -tags: bug 1.2 -bb: http://bitbucket.org/hpk42/py-trunk/issue/65/ - -execnet is not rsynced so fails if run in an ssh-situation. -write test and fix. - dist-testing: fix session hook / setup calling ----------------------------------------------------- tags: bug 1.2 --- a/py/impl/test/dist/txnode.py +++ b/py/impl/test/dist/txnode.py @@ -86,10 +86,12 @@ def install_slave(gateway, config): sys.path.insert(0, os.getcwd()) from py.impl.test.dist.mypickle import PickleChannel from py.impl.test.dist.txnode import SlaveNode + channel.send("basicimport") channel = PickleChannel(channel) slavenode = SlaveNode(channel) slavenode.run() """) + channel.receive() channel = PickleChannel(channel) basetemp = None if gateway.spec.popen: --- a/py/impl/test/dist/gwmanage.py +++ b/py/impl/test/dist/gwmanage.py @@ -35,14 +35,15 @@ class GatewayManager: gateways = [] for gateway in self.group: spec = gateway.spec - if not spec._samefilesystem(): - if spec not in seen: - def finished(): - if notify: - notify("rsyncrootready", spec, source) - rsync.add_target_host(gateway, finished=finished) - seen.add(spec) - gateways.append(gateway) + if spec.popen and not spec.chdir and not spec.python: + continue + if spec not in seen: + def finished(): + if notify: + notify("rsyncrootready", spec, source) + rsync.add_target_host(gateway, finished=finished) + seen.add(spec) + gateways.append(gateway) if seen: self.hook.pytest_gwmanage_rsyncstart( source=source, --- a/py/impl/test/dist/dsession.py +++ b/py/impl/test/dist/dsession.py @@ -1,9 +1,3 @@ -""" - - EXPERIMENTAL dsession session (for dist/non-dist unification) - -""" - import py from py.impl.test.session import Session from py.impl.test import outcome --- a/CHANGELOG +++ b/CHANGELOG @@ -62,6 +62,9 @@ Changes between 1.X and 1.1.1 - fix assert reinterpreation that sees a call containing "keyword=..." +- fix issue65: properly handle dist-testing if no + execnet/py lib installed remotely. + - skip some install-tests if no execnet is available - fix docs, fix internal bin/ script generation --- a/testing/pytest/dist/test_mypickle.py +++ b/testing/pytest/dist/test_mypickle.py @@ -239,20 +239,6 @@ class TestPickleChannelFunctional: error = channel._getremoteerror() assert isinstance(error, UnpickleError) - def test_popen_with_newchannel(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - newchannel = channel.receive() - newchannel.send(42) - """) - channel = PickleChannel(channel) - newchannel = self.gw.newchannel() - channel.send(newchannel) - channel.waitclose() - res = newchannel.receive() - assert res == 42 - def test_popen_with_various_methods(self): channel = self.gw.remote_exec(""" from py.impl.test.dist.mypickle import PickleChannel --- a/bin-for-dist/test_install.py +++ b/bin-for-dist/test_install.py @@ -174,3 +174,52 @@ def test_cmdline_entrypoints(monkeypatch assert expected in points for script in unversioned_scripts: assert script in points + +def test_slave_popen_needs_no_pylib(testdir, venv): + venv.ensure() + #xxx execnet optimizes popen + #ch = venv.makegateway().remote_exec("import execnet") + #py.test.raises(ch.RemoteError, ch.waitclose) + python = venv._cmd("python") + p = testdir.makepyfile(""" + import py + def test_func(): + pass + """) + result = testdir.runpytest(p, '--rsyncdir=%s' % str(p), + '--dist=each', '--tx=popen//python=%s' % python) + result.stdout.fnmatch_lines([ + "*1 passed*" + ]) + +def test_slave_needs_no_execnet(testdir, specssh): + gw = execnet.makegateway(specssh) + ch = gw.remote_exec(""" + import os, subprocess + subprocess.call(["virtualenv", "--no-site-packages", "subdir"]) + channel.send(os.path.join(os.path.abspath("subdir"), 'bin', 'python')) + channel.send(os.path.join(os.path.abspath("subdir"))) + """) + try: + path = ch.receive() + chdir = ch.receive() + except ch.RemoteError: + e = sys.exc_info()[1] + py.test.skip("could not prepare ssh slave:%s" % str(e)) + gw.exit() + newspec = "%s//python=%s//chdir=%s" % (specssh, path, chdir) + gw = execnet.makegateway(newspec) + ch = gw.remote_exec("import execnet") + py.test.raises(ch.RemoteError, ch.waitclose) + gw.exit() + + p = testdir.makepyfile(""" + import py + def test_func(): + pass + """) + result = testdir.runpytest(p, '--rsyncdir=%s' % str(p), + '--dist=each', '--tx=%s' % newspec) + result.stdout.fnmatch_lines([ + "*1 passed*" + ]) --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -166,8 +166,8 @@ class Config(object): raise self.Error("path %r is not relative to %r" % (str(path), str(topdir))) # assumtion: pytest's fs-collector tree follows the filesystem tree - names = filter(None, path.relto(topdir).split(path.sep)) - names.extend(parts) + names = list(filter(None, path.relto(topdir).split(path.sep))) + names += parts try: return self._rootcol.getbynames(names) except ValueError: --- a/py/impl/test/dist/mypickle.py +++ b/py/impl/test/dist/mypickle.py @@ -138,12 +138,8 @@ class PickleChannel(object): self.RemoteError = channel.RemoteError def send(self, obj): - from execnet.gateway_base import Channel - if not isinstance(obj, Channel): - pickled_obj = self._ipickle.dumps(obj) - self._channel.send(pickled_obj) - else: - self._channel.send(obj) + pickled_obj = self._ipickle.dumps(obj) + self._channel.send(pickled_obj) def receive(self): pickled_obj = self._channel.receive() From commits-noreply at bitbucket.org Mon Jan 11 14:31:30 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 11 Jan 2010 13:31:30 +0000 (UTC) Subject: [py-svn] py-trunk commit b7acb4d619f9: refine rsyncing and internal dir/transferal handling: don't transfer roots in a popen- no-chdir situation and only use one py._pydir everywhere Message-ID: <20100111133130.CF3CE7EEE1@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263216650 -3600 # Node ID b7acb4d619f9e4713814df6d1a6fe0512d717b25 # Parent 085f5be42cb97cc7bf999aca6479aa1f2eb17d6f refine rsyncing and internal dir/transferal handling: don't transfer roots in a popen- no-chdir situation and only use one py._pydir everywhere --- a/py/impl/test/dist/gwmanage.py +++ b/py/impl/test/dist/gwmanage.py @@ -3,7 +3,7 @@ """ import py -import sys, os +import sys, os.path import execnet from execnet.gateway_base import RemoteError @@ -35,7 +35,12 @@ class GatewayManager: gateways = [] for gateway in self.group: spec = gateway.spec - if spec.popen and not spec.chdir and not spec.python: + if spec.popen and not spec.chdir: + # XXX this assumes that sources are python-packages + # and that adding the basedir does not hurt + gateway.remote_exec(""" + import sys ; sys.path.insert(0, %r) + """ % os.path.dirname(str(source))).waitclose() continue if spec not in seen: def finished(): --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -269,21 +269,17 @@ class Config(object): def getrsyncdirs(self): config = self - roots = config.option.rsyncdir + candidates = [py._pydir] + config.option.rsyncdir conftestroots = config.getconftest_pathlist("rsyncdirs") if conftestroots: - roots.extend(conftestroots) - pydirs = [x.realpath() for x in py._pydirs] - roots = [py.path.local(root) for root in roots] - for root in roots: + candidates.extend(conftestroots) + roots = [] + for root in candidates: + root = py.path.local(root).realpath() if not root.check(): raise config.Error("rsyncdir doesn't exist: %r" %(root,)) - if pydirs is not None and root.basename in ("py", "_py"): - try: - pydirs.remove(root) # otherwise it's a conflict - except ValueError: # we run as standalone py.test - pass - roots.extend(pydirs) + if root not in roots: + roots.append(root) return roots # --- a/py/__init__.py +++ b/py/__init__.py @@ -19,9 +19,7 @@ py.apipkg.initpkg(__name__, dict( # access to all posix errno's as classes error = '.impl.error:error', - _impldir = '.impl._metainfo:impldir', - _dir = '.impl._metainfo:pydir', - _pydirs = '.impl._metainfo:pydirs', + _pydir = '.impl._metainfo:pydir', version = 'py:__version__', # backward compatibility cmdline = { --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -112,7 +112,7 @@ class TestTraceback_f_g_h: def test_traceback_cut_excludepath(self, testdir): p = testdir.makepyfile("def f(): raise ValueError") excinfo = py.test.raises(ValueError, "p.pyimport().f()") - basedir = py._impldir + basedir = py._pydir newtraceback = excinfo.traceback.cut(excludepath=basedir) assert len(newtraceback) == 1 assert newtraceback[0].frame.code.path == p --- a/CHANGELOG +++ b/CHANGELOG @@ -50,6 +50,8 @@ Changes between 1.X and 1.1.1 - change: pytest doctest plugin is now enabled by default and has a new option --doctest-glob to set a pattern for file matches. +- change: remove internal py._* helper vars, only keep py._pydir + - robustify capturing to survive if custom pytest_runtest_setup code failed and prevented the capturing setup code from running. --- a/testing/pytest/test_outcome.py +++ b/testing/pytest/test_outcome.py @@ -66,6 +66,6 @@ def test_pytest_cmdline_main(testdir): assert 1 if __name__ == '__main__': py.test.cmdline.main([__file__]) - """ % (str(py._dir.dirpath()))) + """ % (str(py._pydir.dirpath()))) import subprocess subprocess.check_call([sys.executable, str(p)]) --- a/py/impl/test/collect.py +++ b/py/impl/test/collect.py @@ -257,7 +257,7 @@ class Collector(Node): path = self.fspath ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=py._dir) + ntraceback = ntraceback.cut(excludepath=py._pydir) traceback = ntraceback.filter() return traceback --- a/py/plugin/pytest_pytester.py +++ b/py/plugin/pytest_pytester.py @@ -318,7 +318,7 @@ class TmpTestdir: assert hasattr(py.cmdline, cmdlinename), cmdlinename source = ("import sys ; sys.path.insert(0, %r); " "import py ; py.cmdline.%s()" % - (str(py._dir.dirpath()), cmdlinename)) + (str(py._pydir.dirpath()), cmdlinename)) return (sys.executable, "-c", source,) def runpython(self, script): @@ -329,7 +329,7 @@ class TmpTestdir: def _getsysprepend(self): if not self.request.config.getvalue("toolsonpath"): - s = "import sys ; sys.path.insert(0, %r) ; " % str(py._dir.dirpath()) + s = "import sys ; sys.path.insert(0, %r) ; " % str(py._pydir.dirpath()) else: s = "" return s --- a/testing/plugin/test_pytest_default.py +++ b/testing/plugin/test_pytest_default.py @@ -59,7 +59,7 @@ class TestDistOptions: def test_getrsyncdirs(self, testdir): config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir)) roots = config.getrsyncdirs() - assert len(roots) == 1 + len(py._pydirs) + assert len(roots) == 1 + 1 # pylib itself assert testdir.tmpdir in roots def test_getrsyncdirs_with_conftest(self, testdir): @@ -71,7 +71,7 @@ class TestDistOptions: """) config = testdir.parseconfigure(testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') roots = config.getrsyncdirs() - assert len(roots) == 3 + len(py._pydirs) + assert len(roots) == 3 + 1 # pylib itself assert py.path.local('y') in roots assert py.path.local('z') in roots assert testdir.tmpdir.join('x') in roots --- a/py/impl/_metainfo.py +++ b/py/impl/_metainfo.py @@ -1,9 +1,2 @@ - import py - pydir = py.path.local(py.__file__).dirpath() -impldir = pydir.join("impl") - -# list of all directories beloging to py -assert impldir.relto(pydir) -pydirs = [pydir] --- a/testing/test_py_imports.py +++ b/testing/test_py_imports.py @@ -25,7 +25,7 @@ def test_virtual_module_identity(): assert local1 is local2 def test_importall(): - base = py._impldir + base = py._pydir.join("impl") nodirs = [ base.join('test', 'testing', 'data'), base.join('path', 'gateway',), --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -128,7 +128,7 @@ class RestWriter: class PluginOverview(RestWriter): def makerest(self, config): - plugindir = py._dir.join('plugin') + plugindir = py._pydir.join('plugin') for cat, specs in plugins: pluginlist = specs.split() self.h1(cat) --- a/py/plugin/pytest_restdoc.py +++ b/py/plugin/pytest_restdoc.py @@ -174,7 +174,7 @@ class ReSTSyntaxTest(py.test.collect.Ite 'to the py package') % (text,) relpath = '/'.join(text.split('/')[1:]) if check: - pkgroot = py._impldir + pkgroot = py._pydir abspath = pkgroot.join(relpath) assert pkgroot.join(relpath).check(), ( 'problem with linkrole :source:`%s`: ' --- a/testing/pytest/dist/test_gwmanage.py +++ b/testing/pytest/dist/test_gwmanage.py @@ -51,12 +51,19 @@ class TestGatewayManagerPopen: hm.makegateways() assert len(hm.group) == 2 for gw in hm.group: - gw.remote_exec = None + class pseudoexec: + args = [] + def __init__(self, *args): + self.args.extend(args) + def waitclose(self): + pass + gw.remote_exec = pseudoexec l = [] hm.rsync(source, notify=lambda *args: l.append(args)) assert not l hm.exit() assert not len(hm.group) + assert "sys.path.insert" in gw.remote_exec.args[0] def test_rsync_popen_with_path(self, hook, mysetup): source, dest = mysetup.source, mysetup.dest --- a/py/impl/test/pycollect.py +++ b/py/impl/test/pycollect.py @@ -246,7 +246,7 @@ class FunctionMixin(PyobjMixin): if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=py._dir) + ntraceback = ntraceback.cut(excludepath=py._pydir) traceback = ntraceback.filter() return traceback From commits-noreply at bitbucket.org Mon Jan 11 14:33:28 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 11 Jan 2010 13:33:28 +0000 (UTC) Subject: [py-svn] py-trunk commit 0d2090ccd84c: fix rest syntax error (thanks fijal) Message-ID: <20100111133328.D21F67EEDE@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263216798 -3600 # Node ID 0d2090ccd84c7180d64e051c2c23a2f8ee00261d # Parent b7acb4d619f9e4713814df6d1a6fe0512d717b25 fix rest syntax error (thanks fijal) --- a/doc/path.txt +++ b/doc/path.txt @@ -40,7 +40,7 @@ a ``py.path.local`` object for us (which >>> foofile.read(1) 'b' -``py.path.svnurl` and ``py.path.svnwc`` +``py.path.svnurl`` and ``py.path.svnwc`` ---------------------------------------------- Two other ``py.path`` implementations that the py lib provides wrap the From commits-noreply at bitbucket.org Mon Jan 11 17:16:56 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 11 Jan 2010 16:16:56 +0000 (UTC) Subject: [py-svn] py-trunk commit 91cf219b3d46: fix sessionstart/sessionfinish handling at the slave side, set "session.nodeid" to id of the slave and make sure "final" teardown failures are reported nicely. fixes issue66. Message-ID: <20100111161656.6FE107EE75@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263226147 -3600 # Node ID 91cf219b3d4659119d7e7df3162c1de0e6332b69 # Parent 0d2090ccd84c7180d64e051c2c23a2f8ee00261d fix sessionstart/sessionfinish handling at the slave side, set "session.nodeid" to id of the slave and make sure "final" teardown failures are reported nicely. fixes issue66. --- a/ISSUES.txt +++ b/ISSUES.txt @@ -42,16 +42,6 @@ test: testing/pytest/dist/test_dsession. Call gateway group termination with a small timeout if available. Should make dist-testing less likely to leave lost processes. -dist-testing: fix session hook / setup calling ------------------------------------------------------ -tags: bug 1.2 - -Currently pytest_sessionstart and finish are called -on the master node and not on the slaves. Call -it on slaves and provide a session.nodeid which defaults -to None for the master and contains the gateway id -for slaves. - have --report=xfailed[-detail] report the actual tracebacks ------------------------------------------------------------------ tags: feature --- a/py/impl/test/dist/txnode.py +++ b/py/impl/test/dist/txnode.py @@ -3,6 +3,7 @@ """ import py from py.impl.test.dist.mypickle import PickleChannel +from py.impl.test import outcome class TXNode(object): """ Represents a Test Execution environment in the controlling process. @@ -55,12 +56,12 @@ class TXNode(object): elif eventname == "slavefinished": self._down = True self.notify("pytest_testnodedown", error=None, node=self) - elif eventname == "pytest_runtest_logreport": - rep = kwargs['report'] - rep.node = self - self.notify("pytest_runtest_logreport", report=rep) + elif eventname in ("pytest_runtest_logreport", + "pytest__teardown_final_logerror"): + kwargs['report'].node = self + self.notify(eventname, **kwargs) else: - self.notify(eventname, *args, **kwargs) + self.notify(eventname, **kwargs) except KeyboardInterrupt: # should not land in receiver-thread raise @@ -99,7 +100,7 @@ def install_slave(gateway, config): basetemp = py.path.local.make_numbered_dir(prefix="slave-", keep=0, rootdir=popenbase) basetemp = str(basetemp) - channel.send((config, basetemp)) + channel.send((config, basetemp, gateway.id)) return channel class SlaveNode(object): @@ -115,9 +116,12 @@ class SlaveNode(object): def pytest_runtest_logreport(self, report): self.sendevent("pytest_runtest_logreport", report=report) + def pytest__teardown_final_logerror(self, report): + self.sendevent("pytest__teardown_final_logerror", report=report) + def run(self): channel = self.channel - self.config, basetemp = channel.receive() + self.config, basetemp, self.nodeid = channel.receive() if basetemp: self.config.basetemp = py.path.local(basetemp) self.config.pluginmanager.do_configure(self.config) @@ -125,22 +129,27 @@ class SlaveNode(object): self.runner = self.config.pluginmanager.getplugin("pytest_runner") self.sendevent("slaveready") try: + self.config.hook.pytest_sessionstart(session=self) while 1: task = channel.receive() if task is None: - self.sendevent("slavefinished") break if isinstance(task, list): for item in task: self.run_single(item=item) else: self.run_single(item=task) + self.config.hook.pytest_sessionfinish( + session=self, + exitstatus=outcome.EXIT_OK) except KeyboardInterrupt: raise except: er = py.code.ExceptionInfo().getrepr(funcargs=True, showlocals=True) self.sendevent("pytest_internalerror", excrepr=er) raise + else: + self.sendevent("slavefinished") def run_single(self, item): call = self.runner.CallInfo(item._checkcollectable, when='setup') --- a/py/impl/test/dist/dsession.py +++ b/py/impl/test/dist/dsession.py @@ -127,6 +127,12 @@ class DSession(Session): elif eventname == "pytest_runtest_logreport": # might be some teardown report self.config.hook.pytest_runtest_logreport(**kwargs) + elif eventname == "pytest_internalerror": + self.config.hook.pytest_internalerror(**kwargs) + loopstate.exitstatus = outcome.EXIT_INTERNALERROR + elif eventname == "pytest__teardown_final_logerror": + self.config.hook.pytest__teardown_final_logerror(**kwargs) + loopstate.exitstatus = outcome.EXIT_TESTSFAILED if not self.node2pending: # finished if loopstate.testsfailed: --- a/py/plugin/pytest_runner.py +++ b/py/plugin/pytest_runner.py @@ -68,6 +68,8 @@ def pytest_runtest_teardown(item): def pytest__teardown_final(session): call = CallInfo(session.config._setupstate.teardown_all, when="teardown") if call.excinfo: + ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) + call.excinfo.traceback = ntraceback.filter() rep = TeardownErrorReport(call.excinfo) return rep --- a/py/impl/test/session.py +++ b/py/impl/test/session.py @@ -13,10 +13,7 @@ Item = py.test.collect.Item Collector = py.test.collect.Collector class Session(object): - """ - Session drives the collection and running of tests - and generates test events for reporters. - """ + nodeid = "" def __init__(self, config): self.config = config self.pluginmanager = config.pluginmanager # shortcut --- a/CHANGELOG +++ b/CHANGELOG @@ -64,6 +64,10 @@ Changes between 1.X and 1.1.1 - fix assert reinterpreation that sees a call containing "keyword=..." +- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish + hooks on slaves during dist-testing, report module/session teardown + hooks correctly. + - fix issue65: properly handle dist-testing if no execnet/py lib installed remotely. --- a/testing/pytest/dist/test_dsession.py +++ b/testing/pytest/dist/test_dsession.py @@ -440,19 +440,66 @@ def test_teardownfails_one_function(test "*1 passed*1 error*" ]) - at py.test.mark.xfail + at py.test.mark.xfail def test_terminate_on_hangingnode(testdir): p = testdir.makeconftest(""" def pytest__teardown_final(session): - if session.nodeid: # running on slave + if session.nodeid == "my": # running on slave import time - time.sleep(2) + time.sleep(3) """) - result = testdir.runpytest(p, '--dist=each', '--tx=popen') + result = testdir.runpytest(p, '--dist=each', '--tx=popen//id=my') assert result.duration < 2.0 result.stdout.fnmatch_lines([ - "*0 passed*", + "*killed*my*", ]) +def test_session_hooks(testdir): + testdir.makeconftest(""" + import sys + def pytest_sessionstart(session): + sys.pytestsessionhooks = session + def pytest_sessionfinish(session): + f = open(session.nodeid or "master", 'w') + f.write("xy") + f.close() + # let's fail on the slave + if session.nodeid: + raise ValueError(42) + """) + p = testdir.makepyfile(""" + import sys + def test_hello(): + assert hasattr(sys, 'pytestsessionhooks') + """) + result = testdir.runpytest(p, "--dist=each", "--tx=popen//id=my1") + result.stdout.fnmatch_lines([ + "*ValueError*", + "*1 passed*", + ]) + assert result.ret + d = result.parseoutcomes() + assert d['passed'] == 1 + assert testdir.tmpdir.join("my1").check() + assert testdir.tmpdir.join("master").check() + +def test_funcarg_teardown_failure(testdir): + p = testdir.makepyfile(""" + def pytest_funcarg__myarg(request): + def teardown(val): + raise ValueError(val) + return request.cached_setup(setup=lambda: 42, teardown=teardown, + scope="module") + def test_hello(myarg): + pass + """) + result = testdir.runpytest(p, "-n1") + assert result.ret + result.stdout.fnmatch_lines([ + "*ValueError*42*", + "*1 passed*1 error*", + ]) + + From commits-noreply at bitbucket.org Tue Jan 12 01:36:06 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 00:36:06 +0000 (UTC) Subject: [py-svn] py-trunk commit 0b97b846458c: refine classname normalization for junit-xml Message-ID: <20100112003606.EA2C97EE88@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263256550 -3600 # Node ID 0b97b846458c967fb211b41ca4bcf65daa890f4f # Parent 91cf219b3d4659119d7e7df3162c1de0e6332b69 refine classname normalization for junit-xml --- a/py/plugin/pytest_logxml.py +++ b/py/plugin/pytest_logxml.py @@ -35,7 +35,7 @@ class LogXML(object): def _opentestcase(self, report): node = report.item d = {'time': self._durations.pop(report.item, "0")} - names = [x.replace(".py", "") for x in node.listnames()] + names = [x.replace(".py", "") for x in node.listnames() if x != "()"] d['classname'] = ".".join(names[:-1]) d['name'] = names[-1] attrs = ['%s="%s"' % item for item in sorted(d.items())] @@ -61,7 +61,7 @@ class LogXML(object): def _opentestcase_collectfailure(self, report): node = report.collector d = {'time': '???'} - names = [x.replace(".py", "") for x in node.listnames()] + names = [x.replace(".py", "") for x in node.listnames() if x != "()"] d['classname'] = ".".join(names[:-1]) d['name'] = names[-1] attrs = ['%s="%s"' % item for item in sorted(d.items())] --- a/testing/plugin/test_pytest_logxml.py +++ b/testing/plugin/test_pytest_logxml.py @@ -49,6 +49,21 @@ class TestPython: assert_attr(fnode, message="test setup failure") assert "ValueError" in fnode.toxml() + def test_classname_instance(self, testdir): + testdir.makepyfile(""" + class TestClass: + def test_method(self): + assert 0 + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, failures=1) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + classname="test_classname_instance.test_classname_instance.TestClass", + name="test_method") + def test_internal_error(self, testdir): testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0") testdir.makepyfile("def test_function(): pass") From commits-noreply at bitbucket.org Tue Jan 12 16:10:40 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 15:10:40 +0000 (UTC) Subject: [py-svn] py-trunk commit 49ee318008dc: test and fix looponfailing wrt to a bug introduced with the cmdline/session startup cleanup. Message-ID: <20100112151040.401FB7EE85@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263308928 -3600 # Node ID 49ee318008dc38c48074039434c358cc9345c002 # Parent 0b97b846458c967fb211b41ca4bcf65daa890f4f test and fix looponfailing wrt to a bug introduced with the cmdline/session startup cleanup. --- a/py/impl/test/looponfail/remote.py +++ b/py/impl/test/looponfail/remote.py @@ -22,9 +22,9 @@ class LooponfailingSession(Session): self.remotecontrol = RemoteControl(self.config) self.out = py.io.TerminalWriter() - def main(self, initialitems=None): + def main(self, initialitems): try: - self.loopstate = loopstate = LoopState(initialitems) + self.loopstate = loopstate = LoopState([]) self.remotecontrol.setup() while 1: self.loop_once(loopstate) --- a/testing/pytest/looponfail/test_remote.py +++ b/testing/pytest/looponfail/test_remote.py @@ -127,3 +127,25 @@ class TestLooponFailing: session.loop_once(loopstate) assert len(loopstate.colitems) == 1 + + + def test_looponfail_functional_fail_to_ok(self, testdir): + p = testdir.makepyfile(""" + def test_one(): + x = 0 + assert x == 1 + """) + child = testdir.spawn_pytest("-f %s" % p) + child.expect("def test_one") + child.expect("x == 1") + child.expect("1 failed") + child.expect("### LOOPONFAILING ####") + child.expect("waiting for changes") + p.write(py.code.Source(""" + def test_one(): + x = 1 + assert x == 1 + """)) + child.expect(".*1 passed.*") + child.kill(15) + From commits-noreply at bitbucket.org Tue Jan 12 16:16:24 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 15:16:24 +0000 (UTC) Subject: [py-svn] py-trunk commit 549cc885f8d7: adding a second requirements file which uses execnet-1.0.2 Message-ID: <20100112151624.BFF327EE85@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263309307 -3600 # Node ID 549cc885f8d7ba1a42b6a32b915077c0d17eeaf1 # Parent 49ee318008dc38c48074039434c358cc9345c002 adding a second requirements file which uses execnet-1.0.2 --- /dev/null +++ b/testing/pip-reqs2.txt @@ -0,0 +1,4 @@ +docutils +pexpect +figleaf +hg+http://bitbucket.org/hpk42/execnet#egg=execnet From commits-noreply at bitbucket.org Tue Jan 12 18:42:16 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 17:42:16 +0000 (UTC) Subject: [py-svn] py-trunk commit a2aa9172049b: remove the PickleChannel dependency for looponfail Message-ID: <20100112174216.38E1A7EE74@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User Ronny Pfannschmidt # Date 1263314106 -3600 # Node ID a2aa9172049b81b1278ce92fbff465900a7afecd # Parent 549cc885f8d7ba1a42b6a32b915077c0d17eeaf1 remove the PickleChannel dependency for looponfail --- a/py/impl/test/looponfail/remote.py +++ b/py/impl/test/looponfail/remote.py @@ -11,7 +11,6 @@ import py import sys import execnet from py.impl.test.session import Session -from py.impl.test.dist.mypickle import PickleChannel from py.impl.test.looponfail import util class LooponfailingSession(Session): @@ -63,19 +62,22 @@ class RemoteControl(object): raise ValueError("already have gateway %r" % self.gateway) self.trace("setting up slave session") self.gateway = self.initgateway() - channel = self.gateway.remote_exec(""" + self.channel = channel = self.gateway.remote_exec(""" import os - from py.impl.test.dist.mypickle import PickleChannel - from py.impl.test.looponfail.remote import slave_runsession + import py chdir = channel.receive() outchannel = channel.gateway.newchannel() channel.send(outchannel) - channel = PickleChannel(channel) os.chdir(chdir) # unpickling config uses cwd as topdir - config, fullwidth, hasmarkup = channel.receive() + config_state = channel.receive() + fullwidth, hasmarkup = channel.receive() + py.test.config.__setstate__(config_state) + import sys sys.stdout = sys.stderr = outchannel.makefile('w') - slave_runsession(channel, config, fullwidth, hasmarkup) + + from py.impl.test.looponfail.remote import slave_runsession + slave_runsession(channel, py.test.config, fullwidth, hasmarkup) """) channel.send(str(self.config.topdir)) remote_outchannel = channel.receive() @@ -83,8 +85,8 @@ class RemoteControl(object): out._file.write(s) out._file.flush() remote_outchannel.setcallback(write) - channel = self.channel = PickleChannel(channel) - channel.send((self.config, out.fullwidth, out.hasmarkup)) + channel.send(self.config.__getstate__()) + channel.send((out.fullwidth, out.hasmarkup)) self.trace("set up of slave session complete") def ensure_teardown(self): --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -114,7 +114,7 @@ class Config(object): for path in self.args: path = py.path.local(path) l.append(path.relto(self.topdir)) - return l, self.option + return l, vars(self.option) def __setstate__(self, repr): # we have to set py.test.config because loading @@ -126,7 +126,8 @@ class Config(object): self._rootcol = RootCollector(config=self) args, cmdlineopts = repr args = [str(self.topdir.join(x)) for x in args] - self.option = cmdlineopts + self.option = CmdOptions() + self.option.__dict__.update(cmdlineopts) self._preparse(args) self._setargs(args) From commits-noreply at bitbucket.org Tue Jan 12 21:57:51 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 20:57:51 +0000 (UTC) Subject: [py-svn] py-trunk commit e725bce364b8: fix reqs2 to point to released execnet Message-ID: <20100112205751.D510D7EE7B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263329846 -3600 # Node ID e725bce364b87e0f3bb79345999ac38418207993 # Parent d5ef2fedcec40fdff3f65ed090fc2d49649a2cb4 fix reqs2 to point to released execnet --- a/testing/pip-reqs2.txt +++ b/testing/pip-reqs2.txt @@ -1,4 +1,4 @@ docutils pexpect figleaf -hg+http://bitbucket.org/hpk42/execnet#egg=execnet +execnet From commits-noreply at bitbucket.org Tue Jan 12 21:57:49 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 20:57:49 +0000 (UTC) Subject: [py-svn] py-trunk commit d5ef2fedcec4: introduce a new pytest_report_header(hook) hook to add additional test-run relevant information to the header of a test report. Message-ID: <20100112205749.C11DF7EE76@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263329005 -3600 # Node ID d5ef2fedcec40fdff3f65ed090fc2d49649a2cb4 # Parent a2aa9172049b81b1278ce92fbff465900a7afecd introduce a new pytest_report_header(hook) hook to add additional test-run relevant information to the header of a test report. --- a/ISSUES.txt +++ b/ISSUES.txt @@ -16,14 +16,6 @@ With 1.1.1 py.test fails at least on win is relative and compared against an absolute conftest.py path. Normalize. -allow plugins/conftests to show extra header information --------------------------------------------------------- -tags: feature 1.2 - -The test-report header should optionally show information -about the under-test package and versions/locations of -involved packages. - make node._checkcollectable more robust ------------------------------------------------- tags: bug 1.2 --- a/py/plugin/pytest_helpconfig.py +++ b/py/plugin/pytest_helpconfig.py @@ -10,9 +10,19 @@ def pytest_addoption(parser): group._addoption('-p', action="append", dest="plugins", default = [], metavar="name", help="early-load given plugin (multi-allowed).") + group.addoption('--traceconfig', + action="store_true", dest="traceconfig", default=False, + help="trace considerations of conftest.py files."), + group._addoption('--nomagic', + action="store_true", dest="nomagic", default=False, + help="don't reinterpret asserts, no traceback cutting. ") + group.addoption('--debug', + action="store_true", dest="debug", default=False, + help="generate and show internal debugging information.") group.addoption("--help-config", action="store_true", dest="helpconfig", help="show available conftest.py and ENV-variable names.") + def pytest_configure(__multicall__, config): if config.option.version: p = py.path.local(py.__file__).dirpath() @@ -65,6 +75,19 @@ conftest_options = ( ('rsyncdirs', 'to-be-rsynced directories for dist-testing'), ) +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using py lib: %s" % (py.path.local(py.__file__).dirpath())) + if config.option.traceconfig: + lines.append("active plugins:") + plugins = [] + items = config.pluginmanager._name2plugin.items() + for name, plugin in items: + lines.append(" %-20s: %s" %(name, repr(plugin))) + return lines + + # ===================================================== # validate plugin syntax and hooks # ===================================================== --- a/CHANGELOG +++ b/CHANGELOG @@ -21,6 +21,9 @@ Changes between 1.X and 1.1.1 - new "pytestconfig" funcarg allows access to test config object +- new "pytest_report_header" hook can return additional lines + to be displayed at the header of a test run. + - (experimental) allow "py.test path::name1::name2::..." for pointing to a test within a test collection directly. This might eventually evolve as a full substitute to "-k" specifications. --- a/testing/plugin/test_pytest_terminal.py +++ b/testing/plugin/test_pytest_terminal.py @@ -289,6 +289,22 @@ class TestTerminal: ]) result.stdout.fnmatch_lines(['*KEYBOARD INTERRUPT*']) + def test_pytest_report_header(self, testdir): + testdir.makeconftest(""" + def pytest_report_header(config): + return "hello: info" + """) + testdir.mkdir("a").join("conftest.py").write("""if 1: + def pytest_report_header(config): + return ["line1", "line2"] + """) + result = testdir.runpytest("a") + result.stdout.fnmatch_lines([ + "*hello: info*", + "line1", + "line2", + ]) + class TestCollectonly: def test_collectonly_basic(self, testdir, linecomp): --- a/py/plugin/hookspec.py +++ b/py/plugin/hookspec.py @@ -109,6 +109,9 @@ def pytest_sessionfinish(session, exitst # hooks for influencing reporting (invoked from pytest_terminal) # ------------------------------------------------------------------------- +def pytest_report_header(config): + """ return a string to be displayed as header info for terminal reporting.""" + def pytest_report_teststatus(report): """ return result-category, shortletter and verbose word for reporting.""" pytest_report_teststatus.firstresult = True --- a/py/plugin/pytest_terminal.py +++ b/py/plugin/pytest_terminal.py @@ -24,17 +24,6 @@ def pytest_addoption(parser): action="store_true", dest="fulltrace", default=False, help="don't cut any tracebacks (default is to cut).") - group = parser.getgroup("debugconfig") - group.addoption('--traceconfig', - action="store_true", dest="traceconfig", default=False, - help="trace considerations of conftest.py files."), - group._addoption('--nomagic', - action="store_true", dest="nomagic", default=False, - help="don't reinterpret asserts, no traceback cutting. ") - group.addoption('--debug', - action="store_true", dest="debug", default=False, - help="generate and show internal debugging information.") - def pytest_configure(config): if config.option.collectonly: @@ -260,19 +249,10 @@ class TerminalReporter: if self.config.option.verbose or self.config.option.debug or getattr(self.config.option, 'pastebin', None): msg += " -- " + str(sys.executable) self.write_line(msg) - - if self.config.option.debug or self.config.option.traceconfig: - self.write_line("using py lib: %s" % (py.path.local(py.__file__).dirpath())) - if self.config.option.traceconfig: - self.write_line("active plugins:") - plugins = [] - items = self.config.pluginmanager._name2plugin.items() - for name, plugin in items: - repr_plugin = repr(plugin) - fullwidth = getattr(self._tw, 'fullwidth', 65000) - if len(repr_plugin)+26 > fullwidth: - repr_plugin = repr_plugin[:(fullwidth-30)] + '...' - self.write_line(" %-20s: %s" %(name, repr_plugin)) + lines = self.config.hook.pytest_report_header(config=self.config) + lines.reverse() + for line in flatten(lines): + self.write_line(line) for i, testarg in enumerate(self.config.args): self.write_line("test object %d: %s" %(i+1, testarg)) @@ -463,3 +443,10 @@ def repr_pythonversion(v=None): except (TypeError, ValueError): return str(v) +def flatten(l): + for x in l: + if isinstance(x, (list, tuple)): + for y in flatten(x): + yield y + else: + yield x From commits-noreply at bitbucket.org Wed Jan 13 00:47:59 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 12 Jan 2010 23:47:59 +0000 (UTC) Subject: [py-svn] py-trunk commit 73b1a73c0ca8: remove figleaf which now lives "outside" Message-ID: <20100112234759.B521F7EE76@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263340040 -3600 # Node ID 73b1a73c0ca8f0c2d246daf5b33222c8f8bafad6 # Parent e725bce364b87e0f3bb79345999ac38418207993 remove figleaf which now lives "outside" --- a/doc/test/plugin/figleaf.txt +++ b/doc/test/plugin/figleaf.txt @@ -1,33 +1,17 @@ - pytest_figleaf plugin ===================== -report python test coverage using the 'figleaf' package. +add options to drive and report python test coverage using the 'figleaf' package. -.. contents:: - :local: +Install the `pytest-figleaf`_ plugin to use figleaf coverage testing:: -Not enabled by default (use "-p" or conftest settings to do so). + easy_install pytest-figleaf -command line options --------------------- +or:: + + pip install pytest-figleaf +This will make py.test have figleaf related options. -``--figleaf`` - trace python coverage with figleaf and write HTML for files below the current working dir -``--fig-data=dir`` - set tracing file, default: ".figleaf". -``--fig-html=dir`` - set html reporting dir, default "html". +.. _`pytest-figleaf`: http://bitbucket.org/hpk42/pytest-figleaf/ -Start improving this plugin in 30 seconds -========================================= - - -1. Download `pytest_figleaf.py`_ plugin source code -2. put it somewhere as ``pytest_figleaf.py`` into your import path -3. a subsequent ``py.test`` run will use your local version - -Checkout customize_, other plugins_ or `get in contact`_. - -.. include:: links.txt --- a/py/plugin/pytest_figleaf.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -report python test coverage using the 'figleaf' package. - -Not enabled by default (use "-p" or conftest settings to do so). -""" -import py -py.test.importorskip("figleaf") -import figleaf.annotate_html - -def pytest_addoption(parser): - group = parser.getgroup('figleaf options') - group.addoption('--figleaf', action='store_true', default=False, - dest = 'figleaf', - help=('trace python coverage with figleaf and write HTML ' - 'for files below the current working dir')) - group.addoption('--fig-data', action='store', default='.figleaf', - dest='figleafdata', metavar="dir", - help='set tracing file, default: ".figleaf".') - group.addoption('--fig-html', action='store', default='html', - dest='figleafhtml', metavar="dir", - help='set html reporting dir, default "html".') - -def pytest_configure(config): - if config.getvalue("figleaf"): - figleaf.start() - -def pytest_terminal_summary(terminalreporter): - config = terminalreporter.config - if not config.getvalue("figleaf"): - return - datafile = py.path.local(config.getvalue('figleafdata')) - tw = terminalreporter._tw - tw.sep('-', 'figleaf') - tw.line('Writing figleaf data to %s' % (datafile)) - figleaf.stop() - figleaf.write_coverage(str(datafile)) - coverage = get_coverage(datafile, config) - reportdir = py.path.local(config.getvalue('figleafhtml')) - tw.line('Writing figleaf html to file://%s' % (reportdir)) - figleaf.annotate_html.prepare_reportdir(str(reportdir)) - exclude = [] - figleaf.annotate_html.report_as_html(coverage, - str(reportdir), exclude, {}) - -def get_coverage(datafile, config): - # basepath = config.topdir - basepath = py.path.local() - data = figleaf.read_coverage(str(datafile)) - d = {} - coverage = figleaf.combine_coverage(d, data) - for path in coverage.keys(): - if not py.path.local(path).relto(basepath): - del coverage[path] - return coverage --- a/doc/test/plugin/terminal.txt +++ b/doc/test/plugin/terminal.txt @@ -23,12 +23,6 @@ command line options traceback verboseness (long/short/no). ``--fulltrace`` don't cut any tracebacks (default is to cut). -``--traceconfig`` - trace considerations of conftest.py files. -``--nomagic`` - don't reinterpret asserts, no traceback cutting. -``--debug`` - generate and show internal debugging information. Start improving this plugin in 30 seconds ========================================= --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,5 +1,5 @@ .. _`pytest_logxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_logxml.py -.. _`terminal`: terminal.html +.. _`helpconfig`: helpconfig.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_monkeypatch.py @@ -17,10 +17,9 @@ .. _`restdoc`: restdoc.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_tmpdir.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_figleaf.py +.. _`terminal`: terminal.html .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_hooklog.py .. _`logxml`: logxml.html -.. _`helpconfig`: helpconfig.html .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_helpconfig.py --- a/doc/test/plugin/hookspec.txt +++ b/doc/test/plugin/hookspec.txt @@ -115,6 +115,9 @@ hook specification sourcecode # hooks for influencing reporting (invoked from pytest_terminal) # ------------------------------------------------------------------------- + def pytest_report_header(config): + """ return a string to be displayed as header info for terminal reporting.""" + def pytest_report_teststatus(report): """ return result-category, shortletter and verbose word for reporting.""" pytest_report_teststatus.firstresult = True --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -8,9 +8,9 @@ mark_ generic mechanism for marking pyth pdb_ interactive debugging with the Python Debugger. -figleaf_ report python test coverage using the 'figleaf' package. +figleaf_ (external) for testing with Titus' figleaf coverage module -coverage_ (3rd) for testing with Ned's coverage module +coverage_ (external) for testing with Ned's coverage module monkeypatch_ safely patch object attributes, dicts and environment variables. @@ -24,9 +24,9 @@ tmpdir_ provide temporary directories to testing domains =============== -oejskit_ (3rd) run javascript tests in real life browsers +oejskit_ (external) run javascript tests in real life browsers -django_ (3rd) for testing django applications +django_ (external) for testing django applications genscript_ generate standalone test script to be distributed along with an application. @@ -38,7 +38,7 @@ pastebin_ submit failure or test session logxml_ logging of test results in JUnit-XML format, for use with Hudson -xmlresult_ (3rd) for generating xml reports and CruiseControl integration +xmlresult_ (external) for generating xml reports and CruiseControl integration resultlog_ non-xml machine-readable logging of test results. --- a/testing/plugin/test_pytest_figleaf.py +++ /dev/null @@ -1,17 +0,0 @@ -import py - -def test_functional(testdir): - py.test.importorskip("figleaf") - testdir.makepyfile(""" - def f(): - x = 42 - def test_whatever(): - pass - """) - result = testdir.runpytest('--figleaf') - assert result.ret == 0 - assert result.stdout.fnmatch_lines([ - '*figleaf html*' - ]) - #print result.stdout.str() - --- a/doc/test/plugin/helpconfig.txt +++ b/doc/test/plugin/helpconfig.txt @@ -17,6 +17,12 @@ command line options display py lib version and import information. ``-p name`` early-load given plugin (multi-allowed). +``--traceconfig`` + trace considerations of conftest.py files. +``--nomagic`` + don't reinterpret asserts, no traceback cutting. +``--debug`` + generate and show internal debugging information. ``--help-config`` show available conftest.py and ENV-variable names. --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -22,6 +22,7 @@ plugins = [ externals = { 'oejskit': "run javascript tests in real life browsers", + 'figleaf': "for testing with Titus' figleaf coverage module", 'django': "for testing django applications", 'coverage': "for testing with Ned's coverage module ", 'xmlresult': "for generating xml reports " @@ -137,7 +138,7 @@ class PluginOverview(RestWriter): docpath = self.target.dirpath(name).new(ext=".txt") if oneliner is not None: htmlpath = docpath.new(ext='.html') - self.para("%s_ (3rd) %s" %(name, oneliner)) + self.para("%s_ (external) %s" %(name, oneliner)) self.add_internal_link(name, htmlpath) else: doc = PluginDoc(docpath) From commits-noreply at bitbucket.org Wed Jan 13 16:01:59 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 15:01:59 +0000 (UTC) Subject: [py-svn] py-trunk commit 3fee6fd40486: remove dist-testing and looponfail code from core. there remain some (pytest_runner particularly) tests that test both plain and dist modes which cannot be easily dis-entangled. food for thought. Message-ID: <20100113150159.BBE417EE7F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263394833 -3600 # Node ID 3fee6fd40486fd5b234a5fc6bf61ef1aa03c1c6d # Parent 73b1a73c0ca8f0c2d246daf5b33222c8f8bafad6 remove dist-testing and looponfail code from core. there remain some (pytest_runner particularly) tests that test both plain and dist modes which cannot be easily dis-entangled. food for thought. --- a/doc/test/plugin/resultlog.txt +++ b/doc/test/plugin/resultlog.txt @@ -1,8 +1,7 @@ - -pytest_resultlog plugin -======================= non-xml machine-readable logging of test results. +================================================= + .. contents:: :local: --- a/testing/pytest/looponfail/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/doc/test/index.txt +++ b/doc/test/index.txt @@ -13,8 +13,6 @@ funcargs_: powerful parametrized test fu `plugins`_: list of available plugins with usage examples and feature details. -`distributed testing`_: ad-hoc run tests on multiple CPUs and platforms - customize_: configuration, customization, extensions changelog_: history of changes covering last releases --- a/testing/pytest/test_config.py +++ b/testing/pytest/test_config.py @@ -180,21 +180,6 @@ class TestConfigApi_getinitialnodes: for col in col.listchain(): assert col.config is config -class TestOptionEffects: - def test_boxed_option_default(self, testdir): - tmpdir = testdir.tmpdir.ensure("subdir", dir=1) - config = testdir.reparseconfig() - config.initsession() - assert not config.option.boxed - py.test.importorskip("execnet") - config = testdir.reparseconfig(['-d', tmpdir]) - config.initsession() - assert not config.option.boxed - - def test_is_not_boxed_by_default(self, testdir): - config = testdir.reparseconfig([testdir.tmpdir]) - assert not config.option.boxed - class TestConfig_gettopdir: def test_gettopdir(self, testdir): from py.impl.test.config import gettopdir --- a/py/impl/test/looponfail/util.py +++ /dev/null @@ -1,53 +0,0 @@ -import py - -class StatRecorder: - def __init__(self, rootdirlist): - self.rootdirlist = rootdirlist - self.statcache = {} - self.check() # snapshot state - - def fil(self, p): - return p.ext in ('.py', '.txt', '.c', '.h') - def rec(self, p): - return p.check(dotfile=0) - - def waitonchange(self, checkinterval=1.0): - while 1: - changed = self.check() - if changed: - return - py.std.time.sleep(checkinterval) - - def check(self, removepycfiles=True): - changed = False - statcache = self.statcache - newstat = {} - for rootdir in self.rootdirlist: - for path in rootdir.visit(self.fil, self.rec): - oldstat = statcache.get(path, None) - if oldstat is not None: - del statcache[path] - try: - newstat[path] = curstat = path.stat() - except py.error.ENOENT: - if oldstat: - del statcache[path] - changed = True - else: - if oldstat: - if oldstat.mtime != curstat.mtime or \ - oldstat.size != curstat.size: - changed = True - py.builtin.print_("# MODIFIED", path) - if removepycfiles and path.ext == ".py": - pycfile = path + "c" - if pycfile.check(): - pycfile.remove() - - else: - changed = True - if statcache: - changed = True - self.statcache = newstat - return changed - --- a/doc/test/plugin/helpconfig.txt +++ b/doc/test/plugin/helpconfig.txt @@ -1,8 +1,7 @@ - -pytest_helpconfig plugin -======================== provide version info, conftest/environment config names. +======================================================== + .. contents:: :local: --- a/doc/test/plugin/terminal.txt +++ b/doc/test/plugin/terminal.txt @@ -1,8 +1,7 @@ - -pytest_terminal plugin -====================== Implements terminal reporting of the full testing process. +========================================================== + .. contents:: :local: --- a/doc/test/plugin/capture.txt +++ b/doc/test/plugin/capture.txt @@ -1,8 +1,7 @@ - -pytest_capture plugin -===================== configurable per-test stdout/stderr capturing mechanisms. +========================================================= + .. contents:: :local: --- a/doc/test/plugin/hooklog.txt +++ b/doc/test/plugin/hooklog.txt @@ -1,8 +1,7 @@ - -pytest_hooklog plugin -===================== log invocations of extension hooks to a file. +============================================= + .. contents:: :local: --- a/testing/test_py_imports.py +++ b/testing/test_py_imports.py @@ -30,7 +30,6 @@ def test_importall(): base.join('test', 'testing', 'data'), base.join('path', 'gateway',), base.join('code', 'oldmagic.py'), - base.join('execnet', 'script'), base.join('compat', 'testing'), ] if sys.version_info >= (3,0): @@ -41,11 +40,6 @@ def test_importall(): def recurse(p): return p.check(dotfile=0) and p.basename != "attic" - try: - import execnet - except ImportError: - execnet = None - for p in base.visit('*.py', recurse): if p.basename == '__init__.py': continue @@ -57,10 +51,6 @@ def test_importall(): else: relpath = relpath.replace(base.sep, '.') modpath = 'py.impl.%s' % relpath - if modpath.startswith("py.impl.test.dist") or \ - modpath.startswith("py.impl.test.looponfail"): - if not execnet: - continue check_import(modpath) def check_import(modpath): --- a/doc/test/plugin/pdb.txt +++ b/doc/test/plugin/pdb.txt @@ -1,8 +1,7 @@ - -pytest_pdb plugin -================= interactive debugging with the Python Debugger. +=============================================== + .. contents:: :local: --- a/py/plugin/pytest_default.py +++ b/py/plugin/pytest_default.py @@ -3,16 +3,6 @@ import sys import py -try: - import execnet - if not py.path.local(py.__file__).check(): - raise ImportError("") -except ImportError: - execnet = None -else: - if not hasattr(execnet, 'Group'): - execnet = None - def pytest_pyfunc_call(__multicall__, pyfuncitem): if not __multicall__.execute(): testfunction = pyfuncitem.obj @@ -63,10 +53,6 @@ def pytest_addoption(parser): "space separated keywords. precede a keyword with '-' to negate. " "Terminate the expression with ':' to treat a match as a signal " "to run all subsequent tests. ") - if execnet: - group._addoption('-f', '--looponfail', - action="store_true", dest="looponfail", default=False, - help="run tests, re-run failing test set until all pass.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', @@ -82,60 +68,15 @@ def pytest_addoption(parser): "test process debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") - if execnet: - add_dist_options(parser) - else: - parser.hints.append( - "'execnet>=1.0.0b4' required for --looponfailing / distributed testing." - ) - -def add_dist_options(parser): - # see http://pytest.org/help/dist") - group = parser.getgroup("dist", "distributed testing") - group._addoption('--dist', metavar="distmode", - action="store", choices=['load', 'each', 'no'], - type="choice", dest="dist", default="no", - help=("set mode for distributing tests to exec environments.\n\n" - "each: send each test to each available environment.\n\n" - "load: send each test to available environment.\n\n" - "(default) no: run tests inprocess, don't distribute.")) - group._addoption('--tx', dest="tx", action="append", default=[], metavar="xspec", - help=("add a test execution environment. some examples: " - "--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 " - "--tx ssh=user at codespeak.net//chdir=testcache")) - group._addoption('-d', - action="store_true", dest="distload", default=False, - help="load-balance tests. shortcut for '--dist=load'") - group._addoption('-n', dest="numprocesses", metavar="numprocesses", - action="store", type="int", - help="shortcut for '--dist=load --tx=NUM*popen'") - group.addoption('--rsyncdir', action="append", default=[], metavar="dir1", - help="add directory for rsyncing to remote tx nodes.") def pytest_configure(config): - fixoptions(config) setsession(config) -def fixoptions(config): - if execnet: - if config.option.numprocesses: - config.option.dist = "load" - config.option.tx = ['popen'] * int(config.option.numprocesses) - if config.option.distload: - config.option.dist = "load" - def setsession(config): val = config.getvalue if val("collectonly"): from py.impl.test.session import Session config.setsessionclass(Session) - elif execnet: - if val("looponfail"): - from py.impl.test.looponfail.remote import LooponfailingSession - config.setsessionclass(LooponfailingSession) - elif val("dist") != "no": - from py.impl.test.dist.dsession import DSession - config.setsessionclass(DSession) # pycollect related hooks and code, should move to pytest_pycollect.py --- a/py/plugin/pytest_pdb.py +++ b/py/plugin/pytest_pdb.py @@ -4,10 +4,6 @@ interactive debugging with the Python De import py import pdb, sys, linecache from py.impl.test.outcome import Skipped -try: - import execnet -except ImportError: - execnet = None def pytest_addoption(parser): group = parser.getgroup("general") @@ -15,16 +11,9 @@ def pytest_addoption(parser): action="store_true", dest="usepdb", default=False, help="start the interactive Python debugger on errors.") - -def pytest_configure(__multicall__, config): - if config.option.usepdb: - if execnet: - __multicall__.execute() - if config.getvalue("looponfail"): - raise config.Error("--pdb incompatible with --looponfail.") - if config.option.dist != "no": - raise config.Error("--pdb incompatible with distributing tests.") - config.pluginmanager.register(PdbInvoke()) +def pytest_configure(config): + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), 'pdb') class PdbInvoke: def pytest_runtest_makereport(self, item, call): --- a/py/impl/test/pluginmanager.py +++ b/py/impl/test/pluginmanager.py @@ -61,6 +61,18 @@ class PluginManager(object): def getplugins(self): return list(self.registry) + def skipifmissing(self, name): + if not self.hasplugin(name): + py.test.skip("plugin %r is missing" % name) + + def hasplugin(self, name): + try: + self.getplugin(name) + except KeyError: + return False + else: + return True + def getplugin(self, name): try: return self._name2plugin[name] --- a/py/impl/test/dist/gwmanage.py +++ /dev/null @@ -1,99 +0,0 @@ -""" - instantiating, managing and rsyncing to test hosts -""" - -import py -import sys, os.path -import execnet -from execnet.gateway_base import RemoteError - -class GatewayManager: - RemoteError = RemoteError - def __init__(self, specs, hook, defaultchdir="pyexecnetcache"): - self.specs = [] - self.hook = hook - self.group = execnet.Group() - for spec in specs: - if not isinstance(spec, execnet.XSpec): - spec = execnet.XSpec(spec) - if not spec.chdir and not spec.popen: - spec.chdir = defaultchdir - self.specs.append(spec) - - def makegateways(self): - assert not list(self.group) - for spec in self.specs: - gw = self.group.makegateway(spec) - self.hook.pytest_gwmanage_newgateway( - gateway=gw, platinfo=gw._rinfo()) - - def rsync(self, source, notify=None, verbose=False, ignores=None): - """ perform rsync to all remote hosts. - """ - rsync = HostRSync(source, verbose=verbose, ignores=ignores) - seen = py.builtin.set() - gateways = [] - for gateway in self.group: - spec = gateway.spec - if spec.popen and not spec.chdir: - # XXX this assumes that sources are python-packages - # and that adding the basedir does not hurt - gateway.remote_exec(""" - import sys ; sys.path.insert(0, %r) - """ % os.path.dirname(str(source))).waitclose() - continue - if spec not in seen: - def finished(): - if notify: - notify("rsyncrootready", spec, source) - rsync.add_target_host(gateway, finished=finished) - seen.add(spec) - gateways.append(gateway) - if seen: - self.hook.pytest_gwmanage_rsyncstart( - source=source, - gateways=gateways, - ) - rsync.send() - self.hook.pytest_gwmanage_rsyncfinish( - source=source, - gateways=gateways, - ) - - def exit(self): - self.group.terminate() - -class HostRSync(execnet.RSync): - """ RSyncer that filters out common files - """ - def __init__(self, sourcedir, *args, **kwargs): - self._synced = {} - ignores= None - if 'ignores' in kwargs: - ignores = kwargs.pop('ignores') - self._ignores = ignores or [] - super(HostRSync, self).__init__(sourcedir=sourcedir, **kwargs) - - def filter(self, path): - path = py.path.local(path) - if not path.ext in ('.pyc', '.pyo'): - if not path.basename.endswith('~'): - if path.check(dotfile=0): - for x in self._ignores: - if path == x: - break - else: - return True - - def add_target_host(self, gateway, finished=None): - remotepath = os.path.basename(self._sourcedir) - super(HostRSync, self).add_target(gateway, remotepath, - finishedcallback=finished, - delete=True,) - - def _report_send_file(self, gateway, modified_rel_path): - if self._verbose: - path = os.path.basename(self._sourcedir) + "/" + modified_rel_path - remotepath = gateway.spec.chdir - py.builtin.print_('%s:%s <= %s' % - (gateway.spec, remotepath, path)) --- a/py/impl/test/config.py +++ b/py/impl/test/config.py @@ -198,8 +198,10 @@ class Config(object): modpath = py.path.local(mod.__file__).dirpath() l = [] for relroot in relroots: - relroot = relroot.replace("/", py.path.local.sep) - l.append(modpath.join(relroot, abs=True)) + if not isinstance(relroot, py.path.local): + relroot = relroot.replace("/", py.path.local.sep) + relroot = modpath.join(relroot, abs=True) + l.append(relroot) return l def addoptions(self, groupname, *specs): @@ -253,46 +255,10 @@ class Config(object): self.trace("instantiated session %r" % session) return session - def getxspecs(self): - xspeclist = [] - for xspec in self.getvalue("tx"): - i = xspec.find("*") - try: - num = int(xspec[:i]) - except ValueError: - xspeclist.append(xspec) - else: - xspeclist.extend([xspec[i+1:]] * num) - if not xspeclist: - raise self.Error("MISSING test execution (tx) nodes: please specify --tx") - import execnet - return [execnet.XSpec(x) for x in xspeclist] - - def getrsyncdirs(self): - config = self - candidates = [py._pydir] + config.option.rsyncdir - conftestroots = config.getconftest_pathlist("rsyncdirs") - if conftestroots: - candidates.extend(conftestroots) - roots = [] - for root in candidates: - root = py.path.local(root).realpath() - if not root.check(): - raise config.Error("rsyncdir doesn't exist: %r" %(root,)) - if root not in roots: - roots.append(root) - return roots - # # helpers # -def checkmarshal(name, value): - try: - py.std.marshal.dumps(value) - except ValueError: - raise ValueError("%s=%r is not marshallable" %(name, value)) - def gettopdir(args): """ return the top directory for the given paths. if the common base dir resides in a python package --- a/testing/pytest/dist/acceptance_test.py +++ /dev/null @@ -1,119 +0,0 @@ -import py - -class TestDistribution: - def test_manytests_to_one_popen(self, testdir): - p1 = testdir.makepyfile(""" - import py - def test_fail0(): - assert 0 - def test_fail1(): - raise ValueError() - def test_ok(): - pass - def test_skip(): - py.test.skip("hello") - """, - ) - result = testdir.runpytest(p1, '-d', '--tx=popen', '--tx=popen') - result.stdout.fnmatch_lines([ - "*0*popen*Python*", - "*1*popen*Python*", - "*2 failed, 1 passed, 1 skipped*", - ]) - assert result.ret == 1 - - def test_dist_conftest_specified(self, testdir): - p1 = testdir.makepyfile(""" - import py - def test_fail0(): - assert 0 - def test_fail1(): - raise ValueError() - def test_ok(): - pass - def test_skip(): - py.test.skip("hello") - """, - ) - testdir.makeconftest(""" - option_tx = 'popen popen popen'.split() - """) - result = testdir.runpytest(p1, '-d') - result.stdout.fnmatch_lines([ - "*0*popen*Python*", - "*1*popen*Python*", - "*2*popen*Python*", - "*2 failed, 1 passed, 1 skipped*", - ]) - assert result.ret == 1 - - def test_dist_tests_with_crash(self, testdir): - if not hasattr(py.std.os, 'kill'): - py.test.skip("no os.kill") - - p1 = testdir.makepyfile(""" - import py - def test_fail0(): - assert 0 - def test_fail1(): - raise ValueError() - def test_ok(): - pass - def test_skip(): - py.test.skip("hello") - def test_crash(): - import time - import os - time.sleep(0.5) - os.kill(os.getpid(), 15) - """ - ) - result = testdir.runpytest(p1, '-d', '--tx=3*popen') - result.stdout.fnmatch_lines([ - "*popen*Python*", - "*popen*Python*", - "*popen*Python*", - "*node down*", - "*3 failed, 1 passed, 1 skipped*" - ]) - assert result.ret == 1 - - def test_distribution_rsyncdirs_example(self, testdir): - source = testdir.mkdir("source") - dest = testdir.mkdir("dest") - subdir = source.mkdir("example_pkg") - subdir.ensure("__init__.py") - p = subdir.join("test_one.py") - p.write("def test_5(): assert not __file__.startswith(%r)" % str(p)) - result = testdir.runpytest("-d", "--rsyncdir=%(subdir)s" % locals(), - "--tx=popen//chdir=%(dest)s" % locals(), p) - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*0* *popen*platform*", - #"RSyncStart: [G1]", - #"RSyncFinished: [G1]", - "*1 passed*" - ]) - assert dest.join(subdir.basename).check(dir=1) - - def test_dist_each(self, testdir): - interpreters = [] - for name in ("python2.4", "python2.5"): - interp = py.path.local.sysfind(name) - if interp is None: - py.test.skip("%s not found" % name) - interpreters.append(interp) - - testdir.makepyfile(__init__="", test_one=""" - import sys - def test_hello(): - print("%s...%s" % sys.version_info[:2]) - assert 0 - """) - args = ["--dist=each"] - args += ["--tx", "popen//python=%s" % interpreters[0]] - args += ["--tx", "popen//python=%s" % interpreters[1]] - result = testdir.runpytest(*args) - s = result.stdout.str() - assert "2.4" in s - assert "2.5" in s --- a/testing/pytest/dist/test_gwmanage.py +++ /dev/null @@ -1,127 +0,0 @@ -""" - tests for - - gateway management - - manage rsyncing of hosts - -""" - -import py -import os -from py.impl.test.dist.gwmanage import GatewayManager, HostRSync -from py.impl.test.pluginmanager import HookRelay, Registry -from py.plugin import hookspec -import execnet - -def pytest_funcarg__hookrecorder(request): - _pytest = request.getfuncargvalue('_pytest') - hook = request.getfuncargvalue('hook') - return _pytest.gethookrecorder(hook) - -def pytest_funcarg__hook(request): - return HookRelay(hookspec, Registry()) - -class TestGatewayManagerPopen: - def test_popen_no_default_chdir(self, hook): - gm = GatewayManager(["popen"], hook) - assert gm.specs[0].chdir is None - - def test_default_chdir(self, hook): - l = ["ssh=noco", "socket=xyz"] - for spec in GatewayManager(l, hook).specs: - assert spec.chdir == "pyexecnetcache" - for spec in GatewayManager(l, hook, defaultchdir="abc").specs: - assert spec.chdir == "abc" - - def test_popen_makegateway_events(self, hook, hookrecorder, _pytest): - hm = GatewayManager(["popen"] * 2, hook) - hm.makegateways() - call = hookrecorder.popcall("pytest_gwmanage_newgateway") - assert call.gateway.spec == execnet.XSpec("popen") - assert call.gateway.id == "gw0" - assert call.platinfo.executable == call.gateway._rinfo().executable - call = hookrecorder.popcall("pytest_gwmanage_newgateway") - assert call.gateway.id == "gw1" - assert len(hm.group) == 2 - hm.exit() - assert not len(hm.group) - - def test_popens_rsync(self, hook, mysetup): - source = mysetup.source - hm = GatewayManager(["popen"] * 2, hook) - hm.makegateways() - assert len(hm.group) == 2 - for gw in hm.group: - class pseudoexec: - args = [] - def __init__(self, *args): - self.args.extend(args) - def waitclose(self): - pass - gw.remote_exec = pseudoexec - l = [] - hm.rsync(source, notify=lambda *args: l.append(args)) - assert not l - hm.exit() - assert not len(hm.group) - assert "sys.path.insert" in gw.remote_exec.args[0] - - def test_rsync_popen_with_path(self, hook, mysetup): - source, dest = mysetup.source, mysetup.dest - hm = GatewayManager(["popen//chdir=%s" %dest] * 1, hook) - hm.makegateways() - source.ensure("dir1", "dir2", "hello") - l = [] - hm.rsync(source, notify=lambda *args: l.append(args)) - assert len(l) == 1 - assert l[0] == ("rsyncrootready", hm.group['gw0'].spec, source) - hm.exit() - dest = dest.join(source.basename) - assert dest.join("dir1").check() - assert dest.join("dir1", "dir2").check() - assert dest.join("dir1", "dir2", 'hello').check() - - def test_rsync_same_popen_twice(self, hook, mysetup, hookrecorder): - source, dest = mysetup.source, mysetup.dest - hm = GatewayManager(["popen//chdir=%s" %dest] * 2, hook) - hm.makegateways() - source.ensure("dir1", "dir2", "hello") - hm.rsync(source) - call = hookrecorder.popcall("pytest_gwmanage_rsyncstart") - assert call.source == source - assert len(call.gateways) == 1 - assert call.gateways[0] in hm.group - call = hookrecorder.popcall("pytest_gwmanage_rsyncfinish") - -class pytest_funcarg__mysetup: - def __init__(self, request): - tmp = request.getfuncargvalue('tmpdir') - self.source = tmp.mkdir("source") - self.dest = tmp.mkdir("dest") - -class TestHRSync: - def test_hrsync_filter(self, mysetup): - source, dest = mysetup.source, mysetup.dest - source.ensure("dir", "file.txt") - source.ensure(".svn", "entries") - source.ensure(".somedotfile", "moreentries") - source.ensure("somedir", "editfile~") - syncer = HostRSync(source) - l = list(source.visit(rec=syncer.filter, - fil=syncer.filter)) - assert len(l) == 3 - basenames = [x.basename for x in l] - assert 'dir' in basenames - assert 'file.txt' in basenames - assert 'somedir' in basenames - - def test_hrsync_one_host(self, mysetup): - source, dest = mysetup.source, mysetup.dest - gw = execnet.makegateway("popen//chdir=%s" % dest) - finished = [] - rsync = HostRSync(source) - rsync.add_target_host(gw, finished=lambda: finished.append(1)) - source.join("hello.py").write("world") - rsync.send() - gw.exit() - assert dest.join(source.basename, "hello.py").check() - assert len(finished) == 1 --- a/py/impl/test/looponfail/remote.py +++ /dev/null @@ -1,165 +0,0 @@ -""" - LooponfailingSession and Helpers. - - NOTE that one really has to avoid loading and depending on - application modules within the controlling process - (the one that starts repeatedly test processes) - otherwise changes to source code can crash - the controlling process which should never happen. -""" -import py -import sys -import execnet -from py.impl.test.session import Session -from py.impl.test.looponfail import util - -class LooponfailingSession(Session): - def __init__(self, config): - super(LooponfailingSession, self).__init__(config=config) - self.rootdirs = [self.config.topdir] # xxx dist_rsync_roots? - self.statrecorder = util.StatRecorder(self.rootdirs) - self.remotecontrol = RemoteControl(self.config) - self.out = py.io.TerminalWriter() - - def main(self, initialitems): - try: - self.loopstate = loopstate = LoopState([]) - self.remotecontrol.setup() - while 1: - self.loop_once(loopstate) - if not loopstate.colitems and loopstate.wasfailing: - continue # the last failures passed, let's rerun all - self.statrecorder.waitonchange(checkinterval=2.0) - except KeyboardInterrupt: - print - - def loop_once(self, loopstate): - colitems = loopstate.colitems - loopstate.wasfailing = colitems and len(colitems) - loopstate.colitems = self.remotecontrol.runsession(colitems or ()) - self.remotecontrol.setup() - -class LoopState: - def __init__(self, colitems=None): - self.colitems = colitems - -class RemoteControl(object): - def __init__(self, config): - self.config = config - - def trace(self, *args): - if self.config.option.debug: - msg = " ".join([str(x) for x in args]) - py.builtin.print_("RemoteControl:", msg) - - def initgateway(self): - return execnet.makegateway("popen") - - def setup(self, out=None): - if out is None: - out = py.io.TerminalWriter() - if hasattr(self, 'gateway'): - raise ValueError("already have gateway %r" % self.gateway) - self.trace("setting up slave session") - self.gateway = self.initgateway() - self.channel = channel = self.gateway.remote_exec(""" - import os - import py - chdir = channel.receive() - outchannel = channel.gateway.newchannel() - channel.send(outchannel) - os.chdir(chdir) # unpickling config uses cwd as topdir - config_state = channel.receive() - fullwidth, hasmarkup = channel.receive() - py.test.config.__setstate__(config_state) - - import sys - sys.stdout = sys.stderr = outchannel.makefile('w') - - from py.impl.test.looponfail.remote import slave_runsession - slave_runsession(channel, py.test.config, fullwidth, hasmarkup) - """) - channel.send(str(self.config.topdir)) - remote_outchannel = channel.receive() - def write(s): - out._file.write(s) - out._file.flush() - remote_outchannel.setcallback(write) - channel.send(self.config.__getstate__()) - channel.send((out.fullwidth, out.hasmarkup)) - self.trace("set up of slave session complete") - - def ensure_teardown(self): - if hasattr(self, 'channel'): - if not self.channel.isclosed(): - self.trace("closing", self.channel) - self.channel.close() - del self.channel - if hasattr(self, 'gateway'): - self.trace("exiting", self.gateway) - self.gateway.exit() - del self.gateway - - def runsession(self, colitems=()): - try: - self.trace("sending", colitems) - trails = colitems - self.channel.send(trails) - try: - return self.channel.receive() - except self.channel.RemoteError: - e = sys.exc_info()[1] - self.trace("ERROR", e) - raise - finally: - self.ensure_teardown() - -def slave_runsession(channel, config, fullwidth, hasmarkup): - """ we run this on the other side. """ - if config.option.debug: - def DEBUG(*args): - print(" ".join(map(str, args))) - else: - def DEBUG(*args): pass - - DEBUG("SLAVE: received configuration, using topdir:", config.topdir) - #config.option.session = None - config.option.looponfail = False - config.option.usepdb = False - trails = channel.receive() - config.pluginmanager.do_configure(config) - DEBUG("SLAVE: initsession()") - session = config.initsession() - # XXX configure the reporter object's terminal writer more directly - # XXX and write a test for this remote-terminal setting logic - config.pytest_terminal_hasmarkup = hasmarkup - config.pytest_terminal_fullwidth = fullwidth - if trails: - colitems = [] - for trail in trails: - try: - colitem = config._rootcol.fromtrail(trail) - except ValueError: - #XXX send info for "test disappeared" or so - continue - colitems.append(colitem) - else: - colitems = config.getinitialnodes() - session.shouldclose = channel.isclosed - - class Failures(list): - def pytest_runtest_logreport(self, report): - if report.failed: - self.append(report) - pytest_collectreport = pytest_runtest_logreport - - failreports = Failures() - session.pluginmanager.register(failreports) - - DEBUG("SLAVE: starting session.main()") - session.main(colitems) - session.config.hook.pytest_looponfailinfo( - failreports=list(failreports), - rootdirs=[config.topdir]) - rootcol = session.config._rootcol - channel.send([rootcol.totrail(rep.getnode()) for rep in failreports]) --- a/testing/plugin/test_pytest_terminal.py +++ b/testing/plugin/test_pytest_terminal.py @@ -3,10 +3,6 @@ terminal reporting of the full testing p """ import py import sys -try: - import execnet -except ImportError: - execnet = None # =============================================================================== # plugin tests @@ -45,12 +41,13 @@ def pytest_generate_tests(metafunc): id="verbose", funcargs={'option': Option(verbose=True)} ) - nodist = getattr(metafunc.function, 'nodist', False) - if execnet and not nodist: - metafunc.addcall( - id="verbose-dist", - funcargs={'option': Option(dist='each', verbose=True)} - ) + if metafunc.config.pluginmanager.hasplugin("xdist"): + nodist = getattr(metafunc.function, 'nodist', False) + if not nodist: + metafunc.addcall( + id="verbose-dist", + funcargs={'option': Option(dist='each', verbose=True)} + ) class TestTerminal: def test_pass_skip_fail(self, testdir, option): @@ -545,7 +542,7 @@ class TestTerminalFunctional: "y* = 'xxxxxx*" ]) - def test_verbose_reporting(self, testdir): + def test_verbose_reporting(self, testdir, pytestconfig): p1 = testdir.makepyfile(""" import py def test_fail(): @@ -568,12 +565,12 @@ class TestTerminalFunctional: "*test_verbose_reporting.py:10: test_gen*FAIL*", ]) assert result.ret == 1 - if execnet: - result = testdir.runpytest(p1, '-v', '-n 1') - result.stdout.fnmatch_lines([ - "*FAIL*test_verbose_reporting.py:2: test_fail*", - ]) - assert result.ret == 1 + pytestconfig.pluginmanager.skipifmissing("xdist") + result = testdir.runpytest(p1, '-v', '-n 1') + result.stdout.fnmatch_lines([ + "*FAIL*test_verbose_reporting.py:2: test_fail*", + ]) + assert result.ret == 1 def test_getreportopt(): --- a/doc/test/plugin/figleaf.txt +++ b/doc/test/plugin/figleaf.txt @@ -1,17 +1,41 @@ -pytest_figleaf plugin -===================== -add options to drive and report python test coverage using the 'figleaf' package. +report test coverage using the 'figleaf' package. +================================================= -Install the `pytest-figleaf`_ plugin to use figleaf coverage testing:: - easy_install pytest-figleaf +.. contents:: + :local: -or:: - - pip install pytest-figleaf +Usage +--------------- -This will make py.test have figleaf related options. +after pip or easy_install mediated installation of ``pytest-figleaf`` you can type:: -.. _`pytest-figleaf`: http://bitbucket.org/hpk42/pytest-figleaf/ + py.test --figleaf [...] +to enable figleaf coverage in your test run. A default ".figleaf" data file +and "html" directory will be created. You can use ``--fig-data`` +and ``fig-html`` to modify the paths. + +command line options +-------------------- + + +``--figleaf`` + trace python coverage with figleaf and write HTML for files below the current working dir +``--fig-data=dir`` + set tracing file, default: ".figleaf". +``--fig-html=dir`` + set html reporting dir, default "html". + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_figleaf.py`_ plugin source code +2. put it somewhere as ``pytest_figleaf.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/doc/test/plugin/genscript.txt +++ b/doc/test/plugin/genscript.txt @@ -1,8 +1,7 @@ - -pytest_genscript plugin -======================= generate standalone test script to be distributed along with an application. +============================================================================ + .. contents:: :local: --- a/doc/test/plugin/recwarn.txt +++ b/doc/test/plugin/recwarn.txt @@ -1,8 +1,7 @@ - -pytest_recwarn plugin -===================== helpers for asserting deprecation and other warnings. +===================================================== + .. contents:: :local: --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,5 +1,5 @@ .. _`pytest_logxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_logxml.py -.. _`helpconfig`: helpconfig.html +.. _`terminal`: terminal.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_monkeypatch.py @@ -15,11 +15,14 @@ .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html +.. _`xdist`: xdist.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_tmpdir.py -.. _`terminal`: terminal.html +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_figleaf.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_hooklog.py .. _`logxml`: logxml.html +.. _`helpconfig`: helpconfig.html +.. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/plugin.py .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_helpconfig.py --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,9 @@ Changes between 1.X and 1.1.1 ===================================== +- moved dist/looponfailing from py.test core into a new + separately released pytest-xdist plugin. + - new junitxml plugin: --xml=path will generate a junit style xml file which is parseable e.g. by the hudson continous integration server. --- a/doc/test/plugin/nose.txt +++ b/doc/test/plugin/nose.txt @@ -1,8 +1,7 @@ - -pytest_nose plugin -================== nose-compatibility plugin: allow to run nose test suites natively. +================================================================== + .. contents:: :local: --- a/testing/pytest/test_pickling.py +++ /dev/null @@ -1,198 +0,0 @@ -import py -import pickle - -def setglobals(request): - oldconfig = py.test.config - print("setting py.test.config to None") - py.test.config = None - def resetglobals(): - py.builtin.print_("setting py.test.config to", oldconfig) - py.test.config = oldconfig - request.addfinalizer(resetglobals) - -def pytest_funcarg__testdir(request): - setglobals(request) - return request.getfuncargvalue("testdir") - -class ImmutablePickleTransport: - def __init__(self, request): - from py.impl.test.dist.mypickle import ImmutablePickler - self.p1 = ImmutablePickler(uneven=0) - self.p2 = ImmutablePickler(uneven=1) - setglobals(request) - - def p1_to_p2(self, obj): - return self.p2.loads(self.p1.dumps(obj)) - - def p2_to_p1(self, obj): - return self.p1.loads(self.p2.dumps(obj)) - - def unifyconfig(self, config): - p2config = self.p1_to_p2(config) - p2config._initafterpickle(config.topdir) - return p2config - -pytest_funcarg__pickletransport = ImmutablePickleTransport - -class TestImmutablePickling: - def test_pickle_config(self, testdir, pickletransport): - config1 = testdir.parseconfig() - assert config1.topdir == testdir.tmpdir - testdir.chdir() - p2config = pickletransport.p1_to_p2(config1) - assert p2config.topdir.realpath() == config1.topdir.realpath() - config_back = pickletransport.p2_to_p1(p2config) - assert config_back is config1 - - def test_pickle_modcol(self, testdir, pickletransport): - modcol1 = testdir.getmodulecol("def test_one(): pass") - modcol2a = pickletransport.p1_to_p2(modcol1) - modcol2b = pickletransport.p1_to_p2(modcol1) - assert modcol2a is modcol2b - - modcol1_back = pickletransport.p2_to_p1(modcol2a) - assert modcol1_back - - def test_pickle_func(self, testdir, pickletransport): - modcol1 = testdir.getmodulecol("def test_one(): pass") - item = modcol1.collect_by_name("test_one") - testdir.chdir() - item2a = pickletransport.p1_to_p2(item) - assert item is not item2a # of course - assert item2a.name == item.name - modback = pickletransport.p2_to_p1(item2a.parent) - assert modback is modcol1 - - -class TestConfigPickling: - def test_config_getstate_setstate(self, testdir): - from py.impl.test.config import Config - testdir.makepyfile(__init__="", conftest="x=1; y=2") - hello = testdir.makepyfile(hello="") - tmp = testdir.tmpdir - testdir.chdir() - config1 = testdir.parseconfig(hello) - config2 = Config() - config2.__setstate__(config1.__getstate__()) - assert config2.topdir == py.path.local() - config2_relpaths = [py.path.local(x).relto(config2.topdir) - for x in config2.args] - config1_relpaths = [py.path.local(x).relto(config1.topdir) - for x in config1.args] - - assert config2_relpaths == config1_relpaths - for name, value in config1.option.__dict__.items(): - assert getattr(config2.option, name) == value - assert config2.getvalue("x") == 1 - - def test_config_pickling_customoption(self, testdir): - testdir.makeconftest(""" - def pytest_addoption(parser): - group = parser.getgroup("testing group") - group.addoption('-G', '--glong', action="store", default=42, - type="int", dest="gdest", help="g value.") - """) - config = testdir.parseconfig("-G", "11") - assert config.option.gdest == 11 - repr = config.__getstate__() - - config = testdir.Config() - py.test.raises(AttributeError, "config.option.gdest") - - config2 = testdir.Config() - config2.__setstate__(repr) - assert config2.option.gdest == 11 - - def test_config_pickling_and_conftest_deprecated(self, testdir): - tmp = testdir.tmpdir.ensure("w1", "w2", dir=1) - tmp.ensure("__init__.py") - tmp.join("conftest.py").write(py.code.Source(""" - def pytest_addoption(parser): - group = parser.getgroup("testing group") - group.addoption('-G', '--glong', action="store", default=42, - type="int", dest="gdest", help="g value.") - """)) - config = testdir.parseconfig(tmp, "-G", "11") - assert config.option.gdest == 11 - repr = config.__getstate__() - - config = testdir.Config() - py.test.raises(AttributeError, "config.option.gdest") - - config2 = testdir.Config() - config2.__setstate__(repr) - assert config2.option.gdest == 11 - - option = config2.addoptions("testing group", - config2.Option('-G', '--glong', action="store", default=42, - type="int", dest="gdest", help="g value.")) - assert option.gdest == 11 - - def test_config_picklability(self, testdir): - config = testdir.parseconfig() - s = pickle.dumps(config) - newconfig = pickle.loads(s) - assert hasattr(newconfig, "topdir") - assert newconfig.topdir == py.path.local() - - def test_collector_implicit_config_pickling(self, testdir): - tmpdir = testdir.tmpdir - testdir.chdir() - testdir.makepyfile(hello="def test_x(): pass") - config = testdir.parseconfig(tmpdir) - col = config.getnode(config.topdir) - io = py.io.BytesIO() - pickler = pickle.Pickler(io) - pickler.dump(col) - io.seek(0) - unpickler = pickle.Unpickler(io) - col2 = unpickler.load() - assert col2.name == col.name - assert col2.listnames() == col.listnames() - - def test_config_and_collector_pickling(self, testdir): - tmpdir = testdir.tmpdir - dir1 = tmpdir.ensure("somedir", dir=1) - config = testdir.parseconfig() - col = config.getnode(config.topdir) - col1 = col.join(dir1.basename) - assert col1.parent is col - io = py.io.BytesIO() - pickler = pickle.Pickler(io) - pickler.dump(col) - pickler.dump(col1) - pickler.dump(col) - io.seek(0) - unpickler = pickle.Unpickler(io) - topdir = tmpdir.ensure("newtopdir", dir=1) - topdir.ensure("somedir", dir=1) - old = topdir.chdir() - try: - newcol = unpickler.load() - newcol2 = unpickler.load() - newcol3 = unpickler.load() - assert newcol2.config is newcol.config - assert newcol2.parent == newcol - assert newcol2.config.topdir.realpath() == topdir.realpath() - assert newcol.fspath.realpath() == topdir.realpath() - assert newcol2.fspath.basename == dir1.basename - assert newcol2.fspath.relto(newcol2.config.topdir) - finally: - old.chdir() - -def test_config__setstate__wired_correctly_in_childprocess(testdir): - execnet = py.test.importorskip("execnet") - from py.impl.test.dist.mypickle import PickleChannel - gw = execnet.makegateway() - channel = gw.remote_exec(""" - import py - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - config = channel.receive() - assert py.test.config == config - """) - channel = PickleChannel(channel) - config = testdir.parseconfig() - channel.send(config) - channel.waitclose() # this will potentially raise - gw.exit() --- a/py/plugin/pytest_runner.py +++ b/py/plugin/pytest_runner.py @@ -8,12 +8,6 @@ from py.impl.test.outcome import Skipped # # pytest plugin hooks -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption('--boxed', - action="store_true", dest="boxed", default=False, - help="box each test run in a separate process (unix)") - # XXX move to pytest_sessionstart and fix py.test owns tests def pytest_configure(config): config._setupstate = SetupState() @@ -36,12 +30,7 @@ def pytest_make_collect_report(collector return CollectReport(collector, result, excinfo) def pytest_runtest_protocol(item): - if item.config.getvalue("boxed"): - reports = forked_run_report(item) - for rep in reports: - item.ihook.pytest_runtest_logreport(report=rep) - else: - runtestprotocol(item) + runtestprotocol(item) return True def runtestprotocol(item, log=True): @@ -116,38 +105,6 @@ class CallInfo: status = "result: %r" % (self.result,) return "" % (self.when, status) -def forked_run_report(item): - # for now, we run setup/teardown in the subprocess - # XXX optionally allow sharing of setup/teardown - EXITSTATUS_TESTEXIT = 4 - from py.impl.test.dist.mypickle import ImmutablePickler - ipickle = ImmutablePickler(uneven=0) - ipickle.selfmemoize(item.config) - # XXX workaround the issue that 2.6 cannot pickle - # instances of classes defined in global conftest.py files - ipickle.selfmemoize(item) - def runforked(): - try: - reports = runtestprotocol(item, log=False) - except KeyboardInterrupt: - py.std.os._exit(EXITSTATUS_TESTEXIT) - return ipickle.dumps(reports) - - ff = py.process.ForkedFunc(runforked) - result = ff.waitfinish() - if result.retval is not None: - return ipickle.loads(result.retval) - else: - if result.exitstatus == EXITSTATUS_TESTEXIT: - py.test.exit("forked test item %s raised Exit" %(item,)) - return [report_process_crash(item, result)] - -def report_process_crash(item, result): - path, lineno = item._getfslineno() - info = "%s:%s: running the test CRASHED with signal %d" %( - path, lineno, result.signal) - return ItemTestReport(item, excinfo=info, when="???") - class BaseReport(object): def __repr__(self): l = ["%s=%s" %(key, value) --- a/testing/plugin/test_pytest_genscript.py +++ b/testing/plugin/test_pytest_genscript.py @@ -25,13 +25,14 @@ def test_gen(testdir, anypython, standal "*imported from*mypytest" ]) -def test_rundist(testdir, standalone): +def test_rundist(testdir, pytestconfig, standalone): + pytestconfig.pluginmanager.skipifmissing("xdist") testdir.makepyfile(""" def test_one(): pass """) result = standalone.run(sys.executable, testdir, '-n', '3') - assert result.ret == 2 - result.stderr.fnmatch_lines([ - "*no such option*" + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*1 passed*", ]) --- a/testing/plugin/test_pytest_runner.py +++ b/testing/plugin/test_pytest_runner.py @@ -221,7 +221,9 @@ class TestExecutionForked(BaseFunctional pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')") def getrunner(self): - return runner.forked_run_report + # XXX re-arrange this test to live in pytest-xdist + xplugin = py.test.importorskip("xdist.plugin") + return xplugin.forked_run_report def test_suicide(self, testdir): reports = testdir.runitem(""" @@ -262,19 +264,6 @@ class TestCollectionReports: assert not rep.passed assert rep.skipped - at py.test.mark.skipif("not hasattr(os, 'fork')") -def test_functional_boxed(testdir): - p1 = testdir.makepyfile(""" - import os - def test_function(): - os.kill(os.getpid(), 15) - """) - result = testdir.runpytest(p1, "--boxed") - assert result.stdout.fnmatch_lines([ - "*CRASHED*", - "*1 failed*" - ]) - def test_callinfo(): ci = runner.CallInfo(lambda: 0, '123') assert ci.when == "123" --- a/py/impl/test/dist/nodemanage.py +++ /dev/null @@ -1,81 +0,0 @@ -import py -import sys, os -from py.impl.test.dist.txnode import TXNode -from py.impl.test.dist.gwmanage import GatewayManager - - -class NodeManager(object): - def __init__(self, config, specs=None): - self.config = config - if specs is None: - specs = self.config.getxspecs() - self.roots = self.config.getrsyncdirs() - self.gwmanager = GatewayManager(specs, config.hook) - self.nodes = [] - self._nodesready = py.std.threading.Event() - - def trace(self, msg): - self.config.hook.pytest_trace(category="nodemanage", msg=msg) - - def config_getignores(self): - return self.config.getconftest_pathlist("rsyncignore") - - def rsync_roots(self): - """ make sure that all remote gateways - have the same set of roots in their - current directory. - """ - self.makegateways() - options = { - 'ignores': self.config_getignores(), - 'verbose': self.config.option.verbose, - } - if self.roots: - # send each rsync root - for root in self.roots: - self.gwmanager.rsync(root, **options) - else: - XXX # do we want to care for situations without explicit rsyncdirs? - # we transfer our topdir as the root - self.gwmanager.rsync(self.config.topdir, **options) - # and cd into it - self.gwmanager.multi_chdir(self.config.topdir.basename, inplacelocal=False) - - def makegateways(self): - # we change to the topdir sot that - # PopenGateways will have their cwd - # such that unpickling configs will - # pick it up as the right topdir - # (for other gateways this chdir is irrelevant) - self.trace("making gateways") - old = self.config.topdir.chdir() - try: - self.gwmanager.makegateways() - finally: - old.chdir() - - def setup_nodes(self, putevent): - self.rsync_roots() - self.trace("setting up nodes") - for gateway in self.gwmanager.group: - node = TXNode(gateway, self.config, putevent, slaveready=self._slaveready) - gateway.node = node # to keep node alive - self.trace("started node %r" % node) - - def _slaveready(self, node): - #assert node.gateway == node.gateway - #assert node.gateway.node == node - self.nodes.append(node) - self.trace("%s slave node ready %r" % (node.gateway.id, node)) - if len(self.nodes) == len(list(self.gwmanager.group)): - self._nodesready.set() - - def wait_nodesready(self, timeout=None): - self._nodesready.wait(timeout) - if not self._nodesready.isSet(): - raise IOError("nodes did not get ready for %r secs" % timeout) - - def teardown_nodes(self): - # XXX do teardown nodes? - self.gwmanager.exit() - --- a/doc/test/plugin/restdoc.txt +++ b/doc/test/plugin/restdoc.txt @@ -1,8 +1,7 @@ - -pytest_restdoc plugin -===================== perform ReST syntax, local and remote reference tests on .rst/.txt files. +========================================================================= + .. contents:: :local: --- a/py/impl/test/looponfail/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/doc/test/plugin/unittest.txt +++ b/doc/test/plugin/unittest.txt @@ -1,8 +1,7 @@ - -pytest_unittest plugin -====================== automatically discover and run traditional "unittest.py" style tests. +===================================================================== + .. contents:: :local: --- a/py/impl/test/dist/txnode.py +++ /dev/null @@ -1,164 +0,0 @@ -""" - Manage setup, running and local representation of remote nodes/processes. -""" -import py -from py.impl.test.dist.mypickle import PickleChannel -from py.impl.test import outcome - -class TXNode(object): - """ Represents a Test Execution environment in the controlling process. - - sets up a slave node through an execnet gateway - - manages sending of test-items and receival of results and events - - creates events when the remote side crashes - """ - ENDMARK = -1 - - def __init__(self, gateway, config, putevent, slaveready=None): - self.config = config - self.putevent = putevent - self.gateway = gateway - self.channel = install_slave(gateway, config) - self._sendslaveready = slaveready - self.channel.setcallback(self.callback, endmarker=self.ENDMARK) - self._down = False - - def __repr__(self): - id = self.gateway.id - status = self._down and 'true' or 'false' - return "" %(id, status) - - def notify(self, eventname, *args, **kwargs): - assert not args - self.putevent((eventname, args, kwargs)) - - def callback(self, eventcall): - """ this gets called for each object we receive from - the other side and if the channel closes. - - Note that channel callbacks run in the receiver - thread of execnet gateways - we need to - avoid raising exceptions or doing heavy work. - """ - try: - if eventcall == self.ENDMARK: - err = self.channel._getremoteerror() - if not self._down: - if not err or isinstance(err, EOFError): - err = "Not properly terminated" - self.notify("pytest_testnodedown", node=self, error=err) - self._down = True - return - eventname, args, kwargs = eventcall - if eventname == "slaveready": - if self._sendslaveready: - self._sendslaveready(self) - self.notify("pytest_testnodeready", node=self) - elif eventname == "slavefinished": - self._down = True - self.notify("pytest_testnodedown", error=None, node=self) - elif eventname in ("pytest_runtest_logreport", - "pytest__teardown_final_logerror"): - kwargs['report'].node = self - self.notify(eventname, **kwargs) - else: - self.notify(eventname, **kwargs) - except KeyboardInterrupt: - # should not land in receiver-thread - raise - except: - excinfo = py.code.ExceptionInfo() - py.builtin.print_("!" * 20, excinfo) - self.config.pluginmanager.notify_exception(excinfo) - - def send(self, item): - assert item is not None - self.channel.send(item) - - def sendlist(self, itemlist): - self.channel.send(itemlist) - - def shutdown(self): - self.channel.send(None) - -# setting up slave code -def install_slave(gateway, config): - channel = gateway.remote_exec(source=""" - import os, sys - sys.path.insert(0, os.getcwd()) - from py.impl.test.dist.mypickle import PickleChannel - from py.impl.test.dist.txnode import SlaveNode - channel.send("basicimport") - channel = PickleChannel(channel) - slavenode = SlaveNode(channel) - slavenode.run() - """) - channel.receive() - channel = PickleChannel(channel) - basetemp = None - if gateway.spec.popen: - popenbase = config.ensuretemp("popen") - basetemp = py.path.local.make_numbered_dir(prefix="slave-", - keep=0, rootdir=popenbase) - basetemp = str(basetemp) - channel.send((config, basetemp, gateway.id)) - return channel - -class SlaveNode(object): - def __init__(self, channel): - self.channel = channel - - def __repr__(self): - return "<%s channel=%s>" %(self.__class__.__name__, self.channel) - - def sendevent(self, eventname, *args, **kwargs): - self.channel.send((eventname, args, kwargs)) - - def pytest_runtest_logreport(self, report): - self.sendevent("pytest_runtest_logreport", report=report) - - def pytest__teardown_final_logerror(self, report): - self.sendevent("pytest__teardown_final_logerror", report=report) - - def run(self): - channel = self.channel - self.config, basetemp, self.nodeid = channel.receive() - if basetemp: - self.config.basetemp = py.path.local(basetemp) - self.config.pluginmanager.do_configure(self.config) - self.config.pluginmanager.register(self) - self.runner = self.config.pluginmanager.getplugin("pytest_runner") - self.sendevent("slaveready") - try: - self.config.hook.pytest_sessionstart(session=self) - while 1: - task = channel.receive() - if task is None: - break - if isinstance(task, list): - for item in task: - self.run_single(item=item) - else: - self.run_single(item=task) - self.config.hook.pytest_sessionfinish( - session=self, - exitstatus=outcome.EXIT_OK) - except KeyboardInterrupt: - raise - except: - er = py.code.ExceptionInfo().getrepr(funcargs=True, showlocals=True) - self.sendevent("pytest_internalerror", excrepr=er) - raise - else: - self.sendevent("slavefinished") - - def run_single(self, item): - call = self.runner.CallInfo(item._checkcollectable, when='setup') - if call.excinfo: - # likely it is not collectable here because of - # platform/import-dependency induced skips - # we fake a setup-error report with the obtained exception - # and do not care about capturing or non-runner hooks - rep = self.runner.pytest_runtest_makereport(item=item, call=call) - self.pytest_runtest_logreport(rep) - return - item.config.hook.pytest_runtest_protocol(item=item) --- a/doc/test/plugin/pastebin.txt +++ b/doc/test/plugin/pastebin.txt @@ -1,8 +1,7 @@ - -pytest_pastebin plugin -====================== submit failure or test session information to a pastebin service. +================================================================= + .. contents:: :local: --- a/py/impl/test/dist/dsession.py +++ /dev/null @@ -1,280 +0,0 @@ -import py -from py.impl.test.session import Session -from py.impl.test import outcome -from py.impl.test.dist.nodemanage import NodeManager -queue = py.builtin._tryimport('queue', 'Queue') - -debug_file = None # open('/tmp/loop.log', 'w') -def debug(*args): - if debug_file is not None: - s = " ".join(map(str, args)) - debug_file.write(s+"\n") - debug_file.flush() - -class LoopState(object): - def __init__(self, dsession, colitems): - self.dsession = dsession - self.colitems = colitems - self.exitstatus = None - # loopstate.dowork is False after reschedule events - # because otherwise we might very busily loop - # waiting for a host to become ready. - self.dowork = True - self.shuttingdown = False - self.testsfailed = False - - def __repr__(self): - return "" % ( - self.exitstatus, self.shuttingdown, len(self.colitems)) - - def pytest_runtest_logreport(self, report): - if report.item in self.dsession.item2nodes: - if report.when != "teardown": # otherwise we already managed it - self.dsession.removeitem(report.item, report.node) - if report.failed: - self.testsfailed = True - - def pytest_collectreport(self, report): - if report.passed: - self.colitems.extend(report.result) - - def pytest_testnodeready(self, node): - self.dsession.addnode(node) - - def pytest_testnodedown(self, node, error=None): - pending = self.dsession.removenode(node) - if pending: - if error: - crashitem = pending[0] - debug("determined crashitem", crashitem) - self.dsession.handle_crashitem(crashitem, node) - # XXX recovery handling for "each"? - # currently pending items are not retried - if self.dsession.config.option.dist == "load": - self.colitems.extend(pending[1:]) - - def pytest_rescheduleitems(self, items): - self.colitems.extend(items) - self.dowork = False # avoid busywait - -class DSession(Session): - """ - Session drives the collection and running of tests - and generates test events for reporters. - """ - MAXITEMSPERHOST = 15 - - def __init__(self, config): - self.queue = queue.Queue() - self.node2pending = {} - self.item2nodes = {} - super(DSession, self).__init__(config=config) - - #def pytest_configure(self, __multicall__, config): - # __multicall__.execute() - # try: - # config.getxspecs() - # except config.Error: - # print - # raise config.Error("dist mode %r needs test execution environments, " - # "none found." %(config.option.dist)) - - def main(self, colitems): - self.sessionstarts() - self.setup() - exitstatus = self.loop(colitems) - self.teardown() - self.sessionfinishes(exitstatus=exitstatus) - return exitstatus - - def loop_once(self, loopstate): - if loopstate.shuttingdown: - return self.loop_once_shutdown(loopstate) - colitems = loopstate.colitems - if loopstate.dowork and colitems: - self.triggertesting(loopstate.colitems) - colitems[:] = [] - # we use a timeout here so that control-C gets through - while 1: - try: - eventcall = self.queue.get(timeout=2.0) - break - except queue.Empty: - continue - loopstate.dowork = True - - callname, args, kwargs = eventcall - if callname is not None: - call = getattr(self.config.hook, callname) - assert not args - call(**kwargs) - - # termination conditions - if ((loopstate.testsfailed and self.config.option.exitfirst) or - (not self.item2nodes and not colitems and not self.queue.qsize())): - self.triggershutdown() - loopstate.shuttingdown = True - elif not self.node2pending: - loopstate.exitstatus = outcome.EXIT_NOHOSTS - - def loop_once_shutdown(self, loopstate): - # once we are in shutdown mode we dont send - # events other than HostDown upstream - eventname, args, kwargs = self.queue.get() - if eventname == "pytest_testnodedown": - self.config.hook.pytest_testnodedown(**kwargs) - self.removenode(kwargs['node']) - elif eventname == "pytest_runtest_logreport": - # might be some teardown report - self.config.hook.pytest_runtest_logreport(**kwargs) - elif eventname == "pytest_internalerror": - self.config.hook.pytest_internalerror(**kwargs) - loopstate.exitstatus = outcome.EXIT_INTERNALERROR - elif eventname == "pytest__teardown_final_logerror": - self.config.hook.pytest__teardown_final_logerror(**kwargs) - loopstate.exitstatus = outcome.EXIT_TESTSFAILED - if not self.node2pending: - # finished - if loopstate.testsfailed: - loopstate.exitstatus = outcome.EXIT_TESTSFAILED - else: - loopstate.exitstatus = outcome.EXIT_OK - #self.config.pluginmanager.unregister(loopstate) - - def _initloopstate(self, colitems): - loopstate = LoopState(self, colitems) - self.config.pluginmanager.register(loopstate) - return loopstate - - def loop(self, colitems): - try: - loopstate = self._initloopstate(colitems) - loopstate.dowork = False # first receive at least one HostUp events - while 1: - self.loop_once(loopstate) - if loopstate.exitstatus is not None: - exitstatus = loopstate.exitstatus - break - except KeyboardInterrupt: - excinfo = py.code.ExceptionInfo() - self.config.hook.pytest_keyboard_interrupt(excinfo=excinfo) - exitstatus = outcome.EXIT_INTERRUPTED - except: - self.config.pluginmanager.notify_exception() - exitstatus = outcome.EXIT_INTERNALERROR - self.config.pluginmanager.unregister(loopstate) - if exitstatus == 0 and self._testsfailed: - exitstatus = outcome.EXIT_TESTSFAILED - return exitstatus - - def triggershutdown(self): - for node in self.node2pending: - node.shutdown() - - def addnode(self, node): - assert node not in self.node2pending - self.node2pending[node] = [] - - def removenode(self, node): - try: - pending = self.node2pending.pop(node) - except KeyError: - # this happens if we didn't receive a testnodeready event yet - return [] - for item in pending: - l = self.item2nodes[item] - l.remove(node) - if not l: - del self.item2nodes[item] - return pending - - def triggertesting(self, colitems): - colitems = self.filteritems(colitems) - senditems = [] - for next in colitems: - if isinstance(next, py.test.collect.Item): - senditems.append(next) - else: - self.config.hook.pytest_collectstart(collector=next) - colrep = self.config.hook.pytest_make_collect_report(collector=next) - self.queueevent("pytest_collectreport", report=colrep) - if self.config.option.dist == "each": - self.senditems_each(senditems) - else: - # XXX assert self.config.option.dist == "load" - self.senditems_load(senditems) - - def queueevent(self, eventname, **kwargs): - self.queue.put((eventname, (), kwargs)) - - def senditems_each(self, tosend): - if not tosend: - return - room = self.MAXITEMSPERHOST - for node, pending in self.node2pending.items(): - room = min(self.MAXITEMSPERHOST - len(pending), room) - sending = tosend[:room] - if sending: - for node, pending in self.node2pending.items(): - node.sendlist(sending) - pending.extend(sending) - for item in sending: - nodes = self.item2nodes.setdefault(item, []) - assert node not in nodes - nodes.append(node) - item.ihook.pytest_itemstart(item=item, node=node) - tosend[:] = tosend[room:] # update inplace - if tosend: - # we have some left, give it to the main loop - self.queueevent("pytest_rescheduleitems", items=tosend) - - def senditems_load(self, tosend): - if not tosend: - return - for node, pending in self.node2pending.items(): - room = self.MAXITEMSPERHOST - len(pending) - if room > 0: - sending = tosend[:room] - node.sendlist(sending) - for item in sending: - #assert item not in self.item2node, ( - # "sending same item %r to multiple " - # "not implemented" %(item,)) - self.item2nodes.setdefault(item, []).append(node) - item.ihook.pytest_itemstart(item=item, node=node) - pending.extend(sending) - tosend[:] = tosend[room:] # update inplace - if not tosend: - break - if tosend: - # we have some left, give it to the main loop - self.queueevent("pytest_rescheduleitems", items=tosend) - - def removeitem(self, item, node): - if item not in self.item2nodes: - raise AssertionError(item, self.item2nodes) - nodes = self.item2nodes[item] - if node in nodes: # the node might have gone down already - nodes.remove(node) - if not nodes: - del self.item2nodes[item] - pending = self.node2pending[node] - pending.remove(item) - - def handle_crashitem(self, item, node): - runner = item.config.pluginmanager.getplugin("runner") - info = "!!! Node %r crashed during running of test %r" %(node, item) - rep = runner.ItemTestReport(item=item, excinfo=info, when="???") - rep.node = node - item.ihook.pytest_runtest_logreport(report=rep) - - def setup(self): - """ setup any neccessary resources ahead of the test run. """ - self.nodemanager = NodeManager(self.config) - self.nodemanager.setup_nodes(putevent=self.queue.put) - if self.config.option.dist == "each": - self.nodemanager.wait_nodesready(5.0) - - def teardown(self): - """ teardown any resources after a test run. """ - self.nodemanager.teardown_nodes() --- a/doc/test/plugin/tmpdir.txt +++ b/doc/test/plugin/tmpdir.txt @@ -1,8 +1,7 @@ - -pytest_tmpdir plugin -==================== provide temporary directories to test functions. +================================================ + .. contents:: :local: --- a/testing/pytest/dist/test_dsession.py +++ /dev/null @@ -1,505 +0,0 @@ -from py.impl.test.dist.dsession import DSession -from py.impl.test import outcome -import py -import execnet - -XSpec = execnet.XSpec - -def run(item, node, excinfo=None): - runner = item.config.pluginmanager.getplugin("runner") - rep = runner.ItemTestReport(item=item, - excinfo=excinfo, when="call") - rep.node = node - return rep - -class MockNode: - def __init__(self): - self.sent = [] - - def sendlist(self, items): - self.sent.append(items) - - def shutdown(self): - self._shutdown=True - -def dumpqueue(queue): - while queue.qsize(): - print(queue.get()) - -class TestDSession: - def test_add_remove_node(self, testdir): - item = testdir.getitem("def test_func(): pass") - node = MockNode() - rep = run(item, node) - session = DSession(item.config) - assert not session.node2pending - session.addnode(node) - assert len(session.node2pending) == 1 - session.senditems_load([item]) - pending = session.removenode(node) - assert pending == [item] - assert item not in session.item2nodes - l = session.removenode(node) - assert not l - - def test_senditems_each_and_receive_with_two_nodes(self, testdir): - item = testdir.getitem("def test_func(): pass") - node1 = MockNode() - node2 = MockNode() - session = DSession(item.config) - session.addnode(node1) - session.addnode(node2) - session.senditems_each([item]) - assert session.node2pending[node1] == [item] - assert session.node2pending[node2] == [item] - assert node1 in session.item2nodes[item] - assert node2 in session.item2nodes[item] - session.removeitem(item, node1) - assert session.item2nodes[item] == [node2] - session.removeitem(item, node2) - assert not session.node2pending[node1] - assert not session.item2nodes - - def test_senditems_load_and_receive_one_node(self, testdir): - item = testdir.getitem("def test_func(): pass") - node = MockNode() - rep = run(item, node) - session = DSession(item.config) - session.addnode(node) - session.senditems_load([item]) - assert session.node2pending[node] == [item] - assert session.item2nodes[item] == [node] - session.removeitem(item, node) - assert not session.node2pending[node] - assert not session.item2nodes - - def test_triggertesting_collect(self, testdir): - modcol = testdir.getmodulecol(""" - def test_func(): - pass - """) - session = DSession(modcol.config) - session.triggertesting([modcol]) - name, args, kwargs = session.queue.get(block=False) - assert name == 'pytest_collectreport' - report = kwargs['report'] - assert len(report.result) == 1 - - def test_triggertesting_item(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - node1 = MockNode() - node2 = MockNode() - session.addnode(node1) - session.addnode(node2) - session.triggertesting([item] * (session.MAXITEMSPERHOST*2 + 1)) - sent1 = node1.sent[0] - sent2 = node2.sent[0] - assert sent1 == [item] * session.MAXITEMSPERHOST - assert sent2 == [item] * session.MAXITEMSPERHOST - assert session.node2pending[node1] == sent1 - assert session.node2pending[node2] == sent2 - name, args, kwargs = session.queue.get(block=False) - assert name == "pytest_rescheduleitems" - assert kwargs['items'] == [item] - - def test_keyboardinterrupt(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - def raise_(timeout=None): raise KeyboardInterrupt() - session.queue.get = raise_ - exitstatus = session.loop([]) - assert exitstatus == outcome.EXIT_INTERRUPTED - - def test_internalerror(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - def raise_(): raise ValueError() - session.queue.get = raise_ - exitstatus = session.loop([]) - assert exitstatus == outcome.EXIT_INTERNALERROR - - def test_rescheduleevent(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - node = MockNode() - session.addnode(node) - loopstate = session._initloopstate([]) - session.queueevent("pytest_rescheduleitems", items=[item]) - session.loop_once(loopstate) - # check that RescheduleEvents are not immediately - # rescheduled if there are no nodes - assert loopstate.dowork == False - session.queueevent(None) - session.loop_once(loopstate) - session.queueevent(None) - session.loop_once(loopstate) - assert node.sent == [[item]] - session.queueevent("pytest_runtest_logreport", report=run(item, node)) - session.loop_once(loopstate) - assert loopstate.shuttingdown - assert not loopstate.testsfailed - - def test_no_node_remaining_for_tests(self, testdir): - item = testdir.getitem("def test_func(): pass") - # setup a session with one node - session = DSession(item.config) - node = MockNode() - session.addnode(node) - - # setup a HostDown event - session.queueevent("pytest_testnodedown", node=node, error=None) - - loopstate = session._initloopstate([item]) - loopstate.dowork = False - session.loop_once(loopstate) - dumpqueue(session.queue) - assert loopstate.exitstatus == outcome.EXIT_NOHOSTS - - def test_removeitem_from_failing_teardown(self, testdir): - # teardown reports only come in when they signal a failure - # internal session-management should basically ignore them - # XXX probably it'S best to invent a new error hook for - # teardown/setup related failures - modcol = testdir.getmodulecol(""" - def test_one(): - pass - def teardown_function(function): - assert 0 - """) - item1, = modcol.collect() - - # setup a session with two nodes - session = DSession(item1.config) - node1, node2 = MockNode(), MockNode() - session.addnode(node1) - session.addnode(node2) - - # have one test pending for a node that goes down - session.senditems_each([item1]) - nodes = session.item2nodes[item1] - class rep: - failed = True - item = item1 - node = nodes[0] - when = "call" - session.queueevent("pytest_runtest_logreport", report=rep) - reprec = testdir.getreportrecorder(session) - print(session.item2nodes) - loopstate = session._initloopstate([]) - assert len(session.item2nodes[item1]) == 2 - session.loop_once(loopstate) - assert len(session.item2nodes[item1]) == 1 - rep.when = "teardown" - session.queueevent("pytest_runtest_logreport", report=rep) - session.loop_once(loopstate) - assert len(session.item2nodes[item1]) == 1 - - def test_testnodedown_causes_reschedule_pending(self, testdir): - modcol = testdir.getmodulecol(""" - def test_crash(): - assert 0 - def test_fail(): - x - """) - item1, item2 = modcol.collect() - - # setup a session with two nodes - session = DSession(item1.config) - node1, node2 = MockNode(), MockNode() - session.addnode(node1) - session.addnode(node2) - - # have one test pending for a node that goes down - session.senditems_load([item1, item2]) - node = session.item2nodes[item1] [0] - item1.config.option.dist = "load" - session.queueevent("pytest_testnodedown", node=node, error="xyz") - reprec = testdir.getreportrecorder(session) - print(session.item2nodes) - loopstate = session._initloopstate([]) - session.loop_once(loopstate) - - assert loopstate.colitems == [item2] # do not reschedule crash item - rep = reprec.matchreport(names="pytest_runtest_logreport") - assert rep.failed - assert rep.item == item1 - assert str(rep.longrepr).find("crashed") != -1 - #assert str(testrep.longrepr).find(node.gateway.spec) != -1 - - def test_testnodeready_adds_to_available(self, testdir): - item = testdir.getitem("def test_func(): pass") - # setup a session with two nodes - session = DSession(item.config) - node1 = MockNode() - session.queueevent("pytest_testnodeready", node=node1) - loopstate = session._initloopstate([item]) - loopstate.dowork = False - assert len(session.node2pending) == 0 - session.loop_once(loopstate) - assert len(session.node2pending) == 1 - - def runthrough(self, item, excinfo=None): - session = DSession(item.config) - node = MockNode() - session.addnode(node) - loopstate = session._initloopstate([item]) - - session.queueevent(None) - session.loop_once(loopstate) - - assert node.sent == [[item]] - ev = run(item, node, excinfo=excinfo) - session.queueevent("pytest_runtest_logreport", report=ev) - session.loop_once(loopstate) - assert loopstate.shuttingdown - session.queueevent("pytest_testnodedown", node=node, error=None) - session.loop_once(loopstate) - dumpqueue(session.queue) - return session, loopstate.exitstatus - - def test_exit_completed_tests_ok(self, testdir): - item = testdir.getitem("def test_func(): pass") - session, exitstatus = self.runthrough(item) - assert exitstatus == outcome.EXIT_OK - - def test_exit_completed_tests_fail(self, testdir): - item = testdir.getitem("def test_func(): 0/0") - session, exitstatus = self.runthrough(item, excinfo="fail") - assert exitstatus == outcome.EXIT_TESTSFAILED - - def test_exit_on_first_failing(self, testdir): - modcol = testdir.getmodulecol(""" - def test_fail(): - assert 0 - def test_pass(): - pass - """) - modcol.config.option.exitfirst = True - session = DSession(modcol.config) - node = MockNode() - session.addnode(node) - items = modcol.config.hook.pytest_make_collect_report(collector=modcol).result - - # trigger testing - this sends tests to the node - session.triggertesting(items) - - # run tests ourselves and produce reports - ev1 = run(items[0], node, "fail") - ev2 = run(items[1], node, None) - session.queueevent("pytest_runtest_logreport", report=ev1) # a failing one - session.queueevent("pytest_runtest_logreport", report=ev2) - # now call the loop - loopstate = session._initloopstate(items) - session.loop_once(loopstate) - assert loopstate.testsfailed - assert loopstate.shuttingdown - - def test_shuttingdown_filters(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - node = MockNode() - session.addnode(node) - loopstate = session._initloopstate([]) - loopstate.shuttingdown = True - reprec = testdir.getreportrecorder(session) - session.queueevent("pytest_runtest_logreport", report=run(item, node)) - session.loop_once(loopstate) - assert not reprec.getcalls("pytest_testnodedown") - session.queueevent("pytest_testnodedown", node=node, error=None) - session.loop_once(loopstate) - assert reprec.getcall('pytest_testnodedown').node == node - - def test_filteritems(self, testdir): - modcol = testdir.getmodulecol(""" - def test_fail(): - assert 0 - def test_pass(): - pass - """) - session = DSession(modcol.config) - - modcol.config.option.keyword = "nothing" - dsel = session.filteritems([modcol]) - assert dsel == [modcol] - items = modcol.collect() - hookrecorder = testdir.getreportrecorder(session).hookrecorder - remaining = session.filteritems(items) - assert remaining == [] - - event = hookrecorder.getcalls("pytest_deselected")[-1] - assert event.items == items - - modcol.config.option.keyword = "test_fail" - remaining = session.filteritems(items) - assert remaining == [items[0]] - - event = hookrecorder.getcalls("pytest_deselected")[-1] - assert event.items == [items[1]] - - def test_testnodedown_shutdown_after_completion(self, testdir): - item = testdir.getitem("def test_func(): pass") - session = DSession(item.config) - - node = MockNode() - session.addnode(node) - session.senditems_load([item]) - session.queueevent("pytest_runtest_logreport", report=run(item, node)) - loopstate = session._initloopstate([]) - session.loop_once(loopstate) - assert node._shutdown is True - assert loopstate.exitstatus is None, "loop did not wait for testnodedown" - assert loopstate.shuttingdown - session.queueevent("pytest_testnodedown", node=node, error=None) - session.loop_once(loopstate) - assert loopstate.exitstatus == 0 - - def test_nopending_but_collection_remains(self, testdir): - modcol = testdir.getmodulecol(""" - def test_fail(): - assert 0 - def test_pass(): - pass - """) - session = DSession(modcol.config) - node = MockNode() - session.addnode(node) - - colreport = modcol.config.hook.pytest_make_collect_report(collector=modcol) - item1, item2 = colreport.result - session.senditems_load([item1]) - # node2pending will become empty when the loop sees the report - rep = run(item1, node) - session.queueevent("pytest_runtest_logreport", report=run(item1, node)) - - # but we have a collection pending - session.queueevent("pytest_collectreport", report=colreport) - - loopstate = session._initloopstate([]) - session.loop_once(loopstate) - assert loopstate.exitstatus is None, "loop did not care for collection report" - assert not loopstate.colitems - session.loop_once(loopstate) - assert loopstate.colitems == colreport.result - assert loopstate.exitstatus is None, "loop did not care for colitems" - - def test_dist_some_tests(self, testdir): - p1 = testdir.makepyfile(test_one=""" - def test_1(): - pass - def test_x(): - import py - py.test.skip("aaa") - def test_fail(): - assert 0 - """) - config = testdir.parseconfig('-d', p1, '--tx=popen') - dsession = DSession(config) - hookrecorder = testdir.getreportrecorder(config).hookrecorder - dsession.main([config.getnode(p1)]) - rep = hookrecorder.popcall("pytest_runtest_logreport").report - assert rep.passed - rep = hookrecorder.popcall("pytest_runtest_logreport").report - assert rep.skipped - rep = hookrecorder.popcall("pytest_runtest_logreport").report - assert rep.failed - # see that the node is really down - node = hookrecorder.popcall("pytest_testnodedown").node - assert node.gateway.spec.popen - #XXX eq.geteventargs("pytest_sessionfinish") - -def test_collected_function_causes_remote_skip(testdir): - sub = testdir.mkpydir("testing") - sub.join("test_module.py").write(py.code.Source(""" - import py - path = py.path.local(%r) - if path.check(): - path.remove() - else: - py.test.skip("remote skip") - def test_func(): - pass - def test_func2(): - pass - """ % str(sub.ensure("somefile")))) - result = testdir.runpytest('-v', '--dist=each', '--tx=popen') - result.stdout.fnmatch_lines([ - "*2 skipped*" - ]) - -def test_teardownfails_one_function(testdir): - p = testdir.makepyfile(""" - def test_func(): - pass - def teardown_function(function): - assert 0 - """) - result = testdir.runpytest(p, '--dist=each', '--tx=popen') - result.stdout.fnmatch_lines([ - "*def teardown_function(function):*", - "*1 passed*1 error*" - ]) - - at py.test.mark.xfail -def test_terminate_on_hangingnode(testdir): - p = testdir.makeconftest(""" - def pytest__teardown_final(session): - if session.nodeid == "my": # running on slave - import time - time.sleep(3) - """) - result = testdir.runpytest(p, '--dist=each', '--tx=popen//id=my') - assert result.duration < 2.0 - result.stdout.fnmatch_lines([ - "*killed*my*", - ]) - - - -def test_session_hooks(testdir): - testdir.makeconftest(""" - import sys - def pytest_sessionstart(session): - sys.pytestsessionhooks = session - def pytest_sessionfinish(session): - f = open(session.nodeid or "master", 'w') - f.write("xy") - f.close() - # let's fail on the slave - if session.nodeid: - raise ValueError(42) - """) - p = testdir.makepyfile(""" - import sys - def test_hello(): - assert hasattr(sys, 'pytestsessionhooks') - """) - result = testdir.runpytest(p, "--dist=each", "--tx=popen//id=my1") - result.stdout.fnmatch_lines([ - "*ValueError*", - "*1 passed*", - ]) - assert result.ret - d = result.parseoutcomes() - assert d['passed'] == 1 - assert testdir.tmpdir.join("my1").check() - assert testdir.tmpdir.join("master").check() - -def test_funcarg_teardown_failure(testdir): - p = testdir.makepyfile(""" - def pytest_funcarg__myarg(request): - def teardown(val): - raise ValueError(val) - return request.cached_setup(setup=lambda: 42, teardown=teardown, - scope="module") - def test_hello(myarg): - pass - """) - result = testdir.runpytest(p, "-n1") - assert result.ret - result.stdout.fnmatch_lines([ - "*ValueError*42*", - "*1 passed*1 error*", - ]) - - --- /dev/null +++ b/doc/test/dist.html @@ -0,0 +1,18 @@ + + + + + + + + + + + --- a/testing/pytest/dist/conftest.py +++ /dev/null @@ -1,4 +0,0 @@ -try: - import execnet -except ImportError: - collect_ignore = ['.'] --- a/testing/pytest/dist/test_nodemanage.py +++ /dev/null @@ -1,127 +0,0 @@ -import py -from py.impl.test.dist.nodemanage import NodeManager - -class pytest_funcarg__mysetup: - def __init__(self, request): - basetemp = request.config.mktemp( - "mysetup-%s" % request.function.__name__, - numbered=True) - self.source = basetemp.mkdir("source") - self.dest = basetemp.mkdir("dest") - request.getfuncargvalue("_pytest") - -class TestNodeManager: - @py.test.mark.xfail - def test_rsync_roots_no_roots(self, testdir, mysetup): - mysetup.source.ensure("dir1", "file1").write("hello") - config = testdir.reparseconfig([source]) - nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) - assert nodemanager.config.topdir == source == config.topdir - nodemanager.rsync_roots() - p, = nodemanager.gwmanager.multi_exec("import os ; channel.send(os.getcwd())").receive_each() - p = py.path.local(p) - py.builtin.print_("remote curdir", p) - assert p == mysetup.dest.join(config.topdir.basename) - assert p.join("dir1").check() - assert p.join("dir1", "file1").check() - - def test_popen_nodes_are_ready(self, testdir): - nodemanager = NodeManager(testdir.parseconfig( - "--tx", "3*popen")) - - nodemanager.setup_nodes([].append) - nodemanager.wait_nodesready(timeout=10.0) - - def test_popen_rsync_subdir(self, testdir, mysetup): - source, dest = mysetup.source, mysetup.dest - dir1 = mysetup.source.mkdir("dir1") - dir2 = dir1.mkdir("dir2") - dir2.ensure("hello") - for rsyncroot in (dir1, source): - dest.remove() - nodemanager = NodeManager(testdir.parseconfig( - "--tx", "popen//chdir=%s" % dest, - "--rsyncdir", rsyncroot, - source, - )) - assert nodemanager.config.topdir == source - nodemanager.rsync_roots() - if rsyncroot == source: - dest = dest.join("source") - assert dest.join("dir1").check() - assert dest.join("dir1", "dir2").check() - assert dest.join("dir1", "dir2", 'hello').check() - nodemanager.gwmanager.exit() - - def test_init_rsync_roots(self, testdir, mysetup): - source, dest = mysetup.source, mysetup.dest - dir2 = source.ensure("dir1", "dir2", dir=1) - source.ensure("dir1", "somefile", dir=1) - dir2.ensure("hello") - source.ensure("bogusdir", "file") - source.join("conftest.py").write(py.code.Source(""" - rsyncdirs = ['dir1/dir2'] - """)) - session = testdir.reparseconfig([source]).initsession() - nodemanager = NodeManager(session.config, ["popen//chdir=%s" % dest]) - nodemanager.rsync_roots() - assert dest.join("dir2").check() - assert not dest.join("dir1").check() - assert not dest.join("bogus").check() - - def test_rsyncignore(self, testdir, mysetup): - source, dest = mysetup.source, mysetup.dest - dir2 = source.ensure("dir1", "dir2", dir=1) - dir5 = source.ensure("dir5", "dir6", "bogus") - dirf = source.ensure("dir5", "file") - dir2.ensure("hello") - source.join("conftest.py").write(py.code.Source(""" - rsyncdirs = ['dir1', 'dir5'] - rsyncignore = ['dir1/dir2', 'dir5/dir6'] - """)) - session = testdir.reparseconfig([source]).initsession() - nodemanager = NodeManager(session.config, - ["popen//chdir=%s" % dest]) - nodemanager.rsync_roots() - assert dest.join("dir1").check() - assert not dest.join("dir1", "dir2").check() - assert dest.join("dir5","file").check() - assert not dest.join("dir6").check() - - def test_optimise_popen(self, testdir, mysetup): - source, dest = mysetup.source, mysetup.dest - specs = ["popen"] * 3 - source.join("conftest.py").write("rsyncdirs = ['a']") - source.ensure('a', dir=1) - config = testdir.reparseconfig([source]) - nodemanager = NodeManager(config, specs) - nodemanager.rsync_roots() - for gwspec in nodemanager.gwmanager.specs: - assert gwspec._samefilesystem() - assert not gwspec.chdir - - def test_setup_DEBUG(self, mysetup, testdir): - source = mysetup.source - specs = ["popen"] * 2 - source.join("conftest.py").write("rsyncdirs = ['a']") - source.ensure('a', dir=1) - config = testdir.reparseconfig([source, '--debug']) - assert config.option.debug - nodemanager = NodeManager(config, specs) - reprec = testdir.getreportrecorder(config).hookrecorder - nodemanager.setup_nodes(putevent=[].append) - for spec in nodemanager.gwmanager.specs: - l = reprec.getcalls("pytest_trace") - assert l - nodemanager.teardown_nodes() - - def test_ssh_setup_nodes(self, specssh, testdir): - testdir.makepyfile(__init__="", test_x=""" - def test_one(): - pass - """) - reprec = testdir.inline_run("-d", "--rsyncdir=%s" % testdir.tmpdir, - "--tx", specssh, testdir.tmpdir) - rep, = reprec.getreports("pytest_runtest_logreport") - assert rep.passed - --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -8,7 +8,7 @@ mark_ generic mechanism for marking pyth pdb_ interactive debugging with the Python Debugger. -figleaf_ (external) for testing with Titus' figleaf coverage module +figleaf_ report test coverage using the 'figleaf' package. coverage_ (external) for testing with Ned's coverage module @@ -21,13 +21,15 @@ recwarn_ helpers for asserting deprecati tmpdir_ provide temporary directories to test functions. -testing domains -=============== +other testing domains, misc +=========================== oejskit_ (external) run javascript tests in real life browsers django_ (external) for testing django applications +xdist_ loop on failing tests, distribute test runs to CPUs and hosts. + genscript_ generate standalone test script to be distributed along with an application. --- a/testing/pytest/dist/test_txnode.py +++ /dev/null @@ -1,148 +0,0 @@ - -import py -import execnet -from py.impl.test.dist.txnode import TXNode -queue = py.builtin._tryimport("queue", "Queue") -Queue = queue.Queue - -class EventQueue: - def __init__(self, registry, queue=None): - if queue is None: - queue = Queue() - self.queue = queue - registry.register(self) - - def geteventargs(self, eventname, timeout=2.0): - events = [] - while 1: - try: - eventcall = self.queue.get(timeout=timeout) - except queue.Empty: - #print "node channel", self.node.channel - #print "remoteerror", self.node.channel._getremoteerror() - py.builtin.print_("seen events", events) - raise IOError("did not see %r events" % (eventname)) - else: - name, args, kwargs = eventcall - assert isinstance(name, str) - if name == eventname: - if args: - return args - return kwargs - events.append(name) - if name == "pytest_internalerror": - py.builtin.print_(str(kwargs["excrepr"])) - -class MySetup: - def __init__(self, request): - self.id = 0 - self.request = request - - def geteventargs(self, eventname, timeout=2.0): - eq = EventQueue(self.config.pluginmanager, self.queue) - return eq.geteventargs(eventname, timeout=timeout) - - def makenode(self, config=None): - if config is None: - testdir = self.request.getfuncargvalue("testdir") - config = testdir.reparseconfig([]) - self.config = config - self.queue = Queue() - self.xspec = execnet.XSpec("popen") - self.gateway = execnet.makegateway(self.xspec) - self.id += 1 - self.gateway.id = str(self.id) - self.node = TXNode(self.gateway, self.config, putevent=self.queue.put) - assert not self.node.channel.isclosed() - return self.node - - def xfinalize(self): - if hasattr(self, 'node'): - gw = self.node.gateway - py.builtin.print_("exiting:", gw) - gw.exit() - -def pytest_funcarg__mysetup(request): - mysetup = MySetup(request) - #pyfuncitem.addfinalizer(mysetup.finalize) - return mysetup - -def test_node_hash_equality(mysetup): - node = mysetup.makenode() - node2 = mysetup.makenode() - assert node != node2 - assert node == node - assert not (node != node) - -class TestMasterSlaveConnection: - def test_crash_invalid_item(self, mysetup): - node = mysetup.makenode() - node.send(123) # invalid item - kwargs = mysetup.geteventargs("pytest_testnodedown") - assert kwargs['node'] is node - assert "Not properly terminated" in str(kwargs['error']) - - def test_crash_killed(self, testdir, mysetup): - if not hasattr(py.std.os, 'kill'): - py.test.skip("no os.kill") - item = testdir.getitem(""" - def test_func(): - import os - os.kill(os.getpid(), 9) - """) - node = mysetup.makenode(item.config) - node.send(item) - kwargs = mysetup.geteventargs("pytest_testnodedown") - assert kwargs['node'] is node - assert "Not properly terminated" in str(kwargs['error']) - - def test_node_down(self, mysetup): - node = mysetup.makenode() - node.shutdown() - kwargs = mysetup.geteventargs("pytest_testnodedown") - assert kwargs['node'] is node - assert not kwargs['error'] - node.callback(node.ENDMARK) - excinfo = py.test.raises(IOError, - "mysetup.geteventargs('testnodedown', timeout=0.01)") - - def test_send_on_closed_channel(self, testdir, mysetup): - item = testdir.getitem("def test_func(): pass") - node = mysetup.makenode(item.config) - node.channel.close() - py.test.raises(IOError, "node.send(item)") - #ev = self.getcalls(pytest_internalerror) - #assert ev.excinfo.errisinstance(IOError) - - def test_send_one(self, testdir, mysetup): - item = testdir.getitem("def test_func(): pass") - node = mysetup.makenode(item.config) - node.send(item) - kwargs = mysetup.geteventargs("pytest_runtest_logreport") - rep = kwargs['report'] - assert rep.passed - py.builtin.print_(rep) - assert rep.item == item - - def test_send_some(self, testdir, mysetup): - items = testdir.getitems(""" - def test_pass(): - pass - def test_fail(): - assert 0 - def test_skip(): - import py - py.test.skip("x") - """) - node = mysetup.makenode(items[0].config) - for item in items: - node.send(item) - for outcome in "passed failed skipped".split(): - kwargs = mysetup.geteventargs("pytest_runtest_logreport") - report = kwargs['report'] - assert getattr(report, outcome) - - node.sendlist(items) - for outcome in "passed failed skipped".split(): - rep = mysetup.geteventargs("pytest_runtest_logreport")['report'] - assert getattr(rep, outcome) --- a/doc/test/plugin/monkeypatch.txt +++ b/doc/test/plugin/monkeypatch.txt @@ -1,8 +1,7 @@ - -pytest_monkeypatch plugin -========================= safely patch object attributes, dicts and environment variables. +================================================================ + .. contents:: :local: --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -6,8 +6,8 @@ plugins = [ ('advanced python testing', 'skipping mark pdb figleaf coverage ' 'monkeypatch capture recwarn tmpdir',), - ('testing domains', - 'oejskit django genscript'), + ('other testing domains, misc', + 'oejskit django xdist genscript'), ('reporting and failure logging', 'pastebin logxml xmlresult resultlog terminal',), ('other testing conventions', @@ -22,7 +22,6 @@ plugins = [ externals = { 'oejskit': "run javascript tests in real life browsers", - 'figleaf': "for testing with Titus' figleaf coverage module", 'django': "for testing django applications", 'coverage': "for testing with Ned's coverage module ", 'xmlresult': "for generating xml reports " @@ -159,7 +158,7 @@ class PluginDoc(RestWriter): config.pluginmanager.import_plugin(name) plugin = config.pluginmanager.getplugin(name) assert plugin is not None, plugin - + print plugin doc = plugin.__doc__.strip() i = doc.find("\n") if i == -1: @@ -169,12 +168,13 @@ class PluginDoc(RestWriter): oneliner = doc[:i].strip() moduledoc = doc[i+1:].strip() - self.name = plugin.__name__.split(".")[-1] + self.name = oneliner # plugin.__name__.split(".")[-1] self.oneliner = oneliner self.moduledoc = moduledoc - self.h1("%s plugin" % self.name) # : %s" %(self.name, self.oneliner)) - self.Print(self.oneliner) + #self.h1("%s plugin" % self.name) # : %s" %(self.name, self.oneliner)) + self.h1(oneliner) + #self.Print(self.oneliner) self.Print() self.Print(".. contents::") self.Print(" :local:") --- a/setup.py +++ b/setup.py @@ -55,8 +55,6 @@ def main(): 'py.impl.path', 'py.impl.process', 'py.impl.test', - 'py.impl.test.dist', - 'py.impl.test.looponfail', ], zip_safe=False, ) --- a/conftest.py +++ b/conftest.py @@ -5,14 +5,7 @@ pytest_plugins = '_pytest doctest pytest collect_ignore = ['build', 'doc/_build'] - rsyncdirs = ['conftest.py', 'bin', 'py', 'doc', 'testing'] -try: - import execnet -except ImportError: - pass -else: - rsyncdirs.append(str(py.path.local(execnet.__file__).dirpath())) import py def pytest_addoption(parser): @@ -20,42 +13,16 @@ def pytest_addoption(parser): group.addoption('--sshhost', action="store", dest="sshhost", default=None, help=("ssh xspec for ssh functional tests. ")) - group.addoption('--gx', - action="append", dest="gspecs", default=None, - help=("add a global test environment, XSpec-syntax. ")) group.addoption('--runslowtests', action="store_true", dest="runslowtests", default=False, help=("run slow tests")) -def pytest_funcarg__specssh(request): - return getspecssh(request.config) -def getgspecs(config): - return [execnet.XSpec(spec) - for spec in config.getvalueorskip("gspecs")] - -# configuration information for tests -def getgspecs(config): - return [execnet.XSpec(spec) - for spec in config.getvalueorskip("gspecs")] - -def getspecssh(config): - xspecs = getgspecs(config) - for spec in xspecs: - if spec.ssh: - if not py.path.local.sysfind("ssh"): - py.test.skip("command not found: ssh") - return spec - py.test.skip("need '--gx ssh=...'") - -def getsocketspec(config): - xspecs = getgspecs(config) - for spec in xspecs: - if spec.socket: - return spec - py.test.skip("need '--gx socket=...'") - - +def pytest_funcarg__sshhost(request): + val = request.config.getvalue("sshhost") + if val: + return val + py.test.skip("need --sshhost option") def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) if multi is not None: --- a/testing/plugin/test_pytest_default.py +++ b/testing/plugin/test_pytest_default.py @@ -1,21 +1,6 @@ import py from py.plugin.pytest_default import pytest_report_iteminfo -def test_implied_different_sessions(testdir, tmpdir): - def x(*args): - config = testdir.reparseconfig([tmpdir] + list(args)) - try: - config.pluginmanager.do_configure(config) - except ValueError: - return Exception - return getattr(config._sessionclass, '__name__', None) - assert x() == None - py.test.importorskip("execnet") - assert x('-d') == 'DSession' - assert x('--dist=each') == 'DSession' - assert x('-n3') == 'DSession' - assert x('-f') == 'LooponfailingSession' - def test_plugin_specify(testdir): testdir.chdir() config = py.test.raises(ImportError, """ @@ -40,50 +25,6 @@ def test_exclude(testdir): assert result.ret == 0 assert result.stdout.fnmatch_lines(["*1 passed*"]) -class TestDistOptions: - def setup_method(self, method): - py.test.importorskip("execnet") - def test_getxspecs(self, testdir): - config = testdir.parseconfigure("--tx=popen", "--tx", "ssh=xyz") - xspecs = config.getxspecs() - assert len(xspecs) == 2 - print(xspecs) - assert xspecs[0].popen - assert xspecs[1].ssh == "xyz" - - def test_xspecs_multiplied(self, testdir): - xspecs = testdir.parseconfigure("--tx=3*popen",).getxspecs() - assert len(xspecs) == 3 - assert xspecs[1].popen - - def test_getrsyncdirs(self, testdir): - config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir)) - roots = config.getrsyncdirs() - assert len(roots) == 1 + 1 # pylib itself - assert testdir.tmpdir in roots - - def test_getrsyncdirs_with_conftest(self, testdir): - p = py.path.local() - for bn in 'x y z'.split(): - p.mkdir(bn) - testdir.makeconftest(""" - rsyncdirs= 'x', - """) - config = testdir.parseconfigure(testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z') - roots = config.getrsyncdirs() - assert len(roots) == 3 + 1 # pylib itself - assert py.path.local('y') in roots - assert py.path.local('z') in roots - assert testdir.tmpdir.join('x') in roots - - def test_dist_options(self, testdir): - config = testdir.parseconfigure("-n 2") - assert config.option.dist == "load" - assert config.option.tx == ['popen'] * 2 - - config = testdir.parseconfigure("-d") - assert config.option.dist == "load" - def test_pytest_report_iteminfo(): class FakeItem(object): --- a/doc/test/plugin/mark.txt +++ b/doc/test/plugin/mark.txt @@ -1,8 +1,7 @@ - -pytest_mark plugin -================== generic mechanism for marking python functions. +=============================================== + .. contents:: :local: --- a/doc/test/plugin/doctest.txt +++ b/doc/test/plugin/doctest.txt @@ -1,8 +1,7 @@ - -pytest_doctest plugin -===================== collect and execute doctests from modules and test files. +========================================================= + .. contents:: :local: --- a/testing/plugin/test_pytest_pdb.py +++ b/testing/plugin/test_pytest_pdb.py @@ -43,14 +43,3 @@ class TestPDB: child.expect("1 failed") if child.isalive(): child.wait() - - def test_dist_incompatibility_messages(self, testdir): - py.test.importorskip("execnet") - Error = py.test.config.Error - py.test.raises(Error, "testdir.parseconfigure('--pdb', '--looponfail')") - result = testdir.runpytest("--pdb", "-n", "3") - assert result.ret != 0 - assert "incompatible" in result.stderr.str() - result = testdir.runpytest("--pdb", "-d", "--tx", "popen") - assert result.ret != 0 - assert "incompatible" in result.stderr.str() --- a/doc/test/plugin/logxml.txt +++ b/doc/test/plugin/logxml.txt @@ -1,8 +1,7 @@ - -pytest_logxml plugin -==================== logging of test results in JUnit-XML format, for use with Hudson +================================================================ + .. contents:: :local: --- a/testing/pytest/dist/test_mypickle.py +++ /dev/null @@ -1,254 +0,0 @@ - -import py -import sys -import execnet - -Queue = py.builtin._tryimport('queue', 'Queue').Queue - -from py.impl.test.dist.mypickle import ImmutablePickler, PickleChannel -from py.impl.test.dist.mypickle import UnpickleError, makekey -# first let's test some basic functionality - -def pytest_generate_tests(metafunc): - if 'picklemod' in metafunc.funcargnames: - import pickle - metafunc.addcall(funcargs={'picklemod': pickle}) - try: - import cPickle - except ImportError: - pass - else: - metafunc.addcall(funcargs={'picklemod': cPickle}) - elif "obj" in metafunc.funcargnames and "proto" in metafunc.funcargnames: - a1 = A() - a2 = A() - a2.a1 = a1 - for proto in (0,1,2, -1): - for obj in {1:2}, [1,2,3], a1, a2: - metafunc.addcall(funcargs=dict(obj=obj, proto=proto)) - -def test_underlying_basic_pickling_mechanisms(picklemod): - f1 = py.io.BytesIO() - f2 = py.io.BytesIO() - - pickler1 = picklemod.Pickler(f1) - unpickler1 = picklemod.Unpickler(f2) - - pickler2 = picklemod.Pickler(f2) - unpickler2 = picklemod.Unpickler(f1) - - #pickler1.memo = unpickler1.memo = {} - #pickler2.memo = unpickler2.memo = {} - - d = {} - - pickler1.dump(d) - f1.seek(0) - d_other = unpickler2.load() - - # translate unpickler2 memo to pickler2 - pickler2.memo = dict([(id(obj), (int(x), obj)) - for x, obj in unpickler2.memo.items()]) - - pickler2.dump(d_other) - f2.seek(0) - - unpickler1.memo = dict([(makekey(x), y) - for x, y in pickler1.memo.values()]) - d_back = unpickler1.load() - assert d is d_back - - -class A: - pass - - -def test_pickle_and_back_IS_same(obj, proto): - p1 = ImmutablePickler(uneven=False, protocol=proto) - p2 = ImmutablePickler(uneven=True, protocol=proto) - s1 = p1.dumps(obj) - d2 = p2.loads(s1) - s2 = p2.dumps(d2) - obj_back = p1.loads(s2) - assert obj is obj_back - -def test_pickling_twice_before_unpickling(): - p1 = ImmutablePickler(uneven=False) - p2 = ImmutablePickler(uneven=True) - - a1 = A() - a2 = A() - a3 = A() - a3.a1 = a1 - a2.a1 = a1 - s1 = p1.dumps(a1) - a1.a3 = a3 - s2 = p1.dumps(a2) - other_a1 = p2.loads(s1) - other_a2 = p2.loads(s2) - back_a1 = p1.loads(p2.dumps(other_a1)) - other_a3 = p2.loads(p1.dumps(a3)) - back_a3 = p1.loads(p2.dumps(other_a3)) - back_a2 = p1.loads(p2.dumps(other_a2)) - back_a1 = p1.loads(p2.dumps(other_a1)) - assert back_a1 is a1 - assert back_a2 is a2 - -def test_pickling_concurrently(): - p1 = ImmutablePickler(uneven=False) - p2 = ImmutablePickler(uneven=True) - - a1 = A() - a1.hasattr = 42 - a2 = A() - - s1 = p1.dumps(a1) - s2 = p2.dumps(a2) - other_a1 = p2.loads(s1) - other_a2 = p1.loads(s2) - a1_back = p1.loads(p2.dumps(other_a1)) - -def test_self_memoize(): - p1 = ImmutablePickler(uneven=False) - a1 = A() - p1.selfmemoize(a1) - x = p1.loads(p1.dumps(a1)) - assert x is a1 - -TESTTIMEOUT = 2.0 -class TestPickleChannelFunctional: - def setup_class(cls): - cls.gw = execnet.PopenGateway() - cls.gw.remote_exec( - "import py ; py.path.local(%r).pyimport()" %(__file__) - ) - cls.gw.remote_init_threads(5) - # we need the remote test code to import - # the same test module here - - def test_popen_send_instance(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - from testing.pytest.dist.test_mypickle import A - a1 = A() - a1.hello = 10 - channel.send(a1) - a2 = channel.receive() - channel.send(a2 is a1) - """) - channel = PickleChannel(channel) - a_received = channel.receive() - assert isinstance(a_received, A) - assert a_received.hello == 10 - channel.send(a_received) - remote_a2_is_a1 = channel.receive() - assert remote_a2_is_a1 - - def test_send_concurrent(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - from testing.pytest.dist.test_mypickle import A - l = [A() for i in range(10)] - channel.send(l) - other_l = channel.receive() - channel.send((l, other_l)) - channel.send(channel.receive()) - channel.receive() - """) - channel = PickleChannel(channel) - l = [A() for i in range(10)] - channel.send(l) - other_l = channel.receive() - channel.send(other_l) - ret = channel.receive() - assert ret[0] is other_l - assert ret[1] is l - back = channel.receive() - assert other_l is other_l - channel.send(None) - - #s1 = p1.dumps(a1) - #s2 = p2.dumps(a2) - #other_a1 = p2.loads(s1) - #other_a2 = p1.loads(s2) - #a1_back = p1.loads(p2.dumps(other_a1)) - - def test_popen_with_callback(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - from testing.pytest.dist.test_mypickle import A - a1 = A() - a1.hello = 10 - channel.send(a1) - a2 = channel.receive() - channel.send(a2 is a1) - """) - channel = PickleChannel(channel) - queue = Queue() - channel.setcallback(queue.put) - a_received = queue.get(timeout=TESTTIMEOUT) - assert isinstance(a_received, A) - assert a_received.hello == 10 - channel.send(a_received) - #remote_a2_is_a1 = queue.get(timeout=TESTTIMEOUT) - #assert remote_a2_is_a1 - - def test_popen_with_callback_with_endmarker(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - from testing.pytest.dist.test_mypickle import A - a1 = A() - a1.hello = 10 - channel.send(a1) - a2 = channel.receive() - channel.send(a2 is a1) - """) - channel = PickleChannel(channel) - queue = Queue() - channel.setcallback(queue.put, endmarker=-1) - - a_received = queue.get(timeout=TESTTIMEOUT) - assert isinstance(a_received, A) - assert a_received.hello == 10 - channel.send(a_received) - remote_a2_is_a1 = queue.get(timeout=TESTTIMEOUT) - assert remote_a2_is_a1 - endmarker = queue.get(timeout=TESTTIMEOUT) - assert endmarker == -1 - - def test_popen_with_callback_with_endmarker_and_unpickling_error(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - from testing.pytest.dist.test_mypickle import A - a1 = A() - channel.send(a1) - channel.send(a1) - """) - channel = PickleChannel(channel) - queue = Queue() - a = channel.receive() - channel._ipickle._unpicklememo.clear() - channel.setcallback(queue.put, endmarker=-1) - next = queue.get(timeout=TESTTIMEOUT) - assert next == -1 - error = channel._getremoteerror() - assert isinstance(error, UnpickleError) - - def test_popen_with_various_methods(self): - channel = self.gw.remote_exec(""" - from py.impl.test.dist.mypickle import PickleChannel - channel = PickleChannel(channel) - channel.receive() - """) - channel = PickleChannel(channel) - assert not channel.isclosed() - assert not channel._getremoteerror() - channel.send(2) - channel.waitclose(timeout=2) - - --- a/testing/pytest/looponfail/test_remote.py +++ /dev/null @@ -1,151 +0,0 @@ -import py -py.test.importorskip("execnet") -from py.impl.test.looponfail.remote import LooponfailingSession, LoopState, RemoteControl - -class TestRemoteControl: - def test_nofailures(self, testdir): - item = testdir.getitem("def test_func(): pass\n") - control = RemoteControl(item.config) - control.setup() - failures = control.runsession() - assert not failures - - def test_failures_somewhere(self, testdir): - item = testdir.getitem("def test_func(): assert 0\n") - control = RemoteControl(item.config) - control.setup() - failures = control.runsession() - assert failures - control.setup() - item.fspath.write("def test_func(): assert 1\n") - pyc = item.fspath.new(ext=".pyc") - if pyc.check(): - pyc.remove() - failures = control.runsession(failures) - assert not failures - - def test_failure_change(self, testdir): - modcol = testdir.getitem(""" - def test_func(): - assert 0 - """) - control = RemoteControl(modcol.config) - control.setup() - failures = control.runsession() - assert failures - control.setup() - modcol.fspath.write(py.code.Source(""" - def test_func(): - assert 1 - def test_new(): - assert 0 - """)) - pyc = modcol.fspath.new(ext=".pyc") - if pyc.check(): - pyc.remove() - failures = control.runsession(failures) - assert not failures - control.setup() - failures = control.runsession() - assert failures - assert str(failures).find("test_new") != -1 - -class TestLooponFailing: - def test_looponfail_from_fail_to_ok(self, testdir): - modcol = testdir.getmodulecol(""" - def test_one(): - x = 0 - assert x == 1 - def test_two(): - assert 1 - """) - session = LooponfailingSession(modcol.config) - loopstate = LoopState() - session.remotecontrol.setup() - session.loop_once(loopstate) - assert len(loopstate.colitems) == 1 - - modcol.fspath.write(py.code.Source(""" - def test_one(): - x = 15 - assert x == 15 - def test_two(): - assert 1 - """)) - assert session.statrecorder.check() - session.loop_once(loopstate) - assert not loopstate.colitems - - def test_looponfail_from_one_to_two_tests(self, testdir): - modcol = testdir.getmodulecol(""" - def test_one(): - assert 0 - """) - session = LooponfailingSession(modcol.config) - loopstate = LoopState() - session.remotecontrol.setup() - loopstate.colitems = [] - session.loop_once(loopstate) - assert len(loopstate.colitems) == 1 - - modcol.fspath.write(py.code.Source(""" - def test_one(): - assert 1 # passes now - def test_two(): - assert 0 # new and fails - """)) - assert session.statrecorder.check() - session.loop_once(loopstate) - assert len(loopstate.colitems) == 0 - - session.loop_once(loopstate) - assert len(loopstate.colitems) == 1 - - def test_looponfail_removed_test(self, testdir): - modcol = testdir.getmodulecol(""" - def test_one(): - assert 0 - def test_two(): - assert 0 - """) - session = LooponfailingSession(modcol.config) - loopstate = LoopState() - session.remotecontrol.setup() - loopstate.colitems = [] - session.loop_once(loopstate) - assert len(loopstate.colitems) == 2 - - modcol.fspath.write(py.code.Source(""" - def test_xxx(): # renamed test - assert 0 - def test_two(): - assert 1 # pass now - """)) - assert session.statrecorder.check() - session.loop_once(loopstate) - assert len(loopstate.colitems) == 0 - - session.loop_once(loopstate) - assert len(loopstate.colitems) == 1 - - - def test_looponfail_functional_fail_to_ok(self, testdir): - p = testdir.makepyfile(""" - def test_one(): - x = 0 - assert x == 1 - """) - child = testdir.spawn_pytest("-f %s" % p) - child.expect("def test_one") - child.expect("x == 1") - child.expect("1 failed") - child.expect("### LOOPONFAILING ####") - child.expect("waiting for changes") - p.write(py.code.Source(""" - def test_one(): - x = 1 - assert x == 1 - """)) - child.expect(".*1 passed.*") - child.kill(15) - --- a/testing/pytest/dist/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- /dev/null +++ b/doc/test/plugin/xdist.txt @@ -0,0 +1,181 @@ + +loop on failing tests, distribute test runs to CPUs and hosts. +============================================================== + + +.. contents:: + :local: + +The `pytest-xdist`_ plugin extends py.test with some unique +test execution modes: + +* Looponfail: run your tests in a subprocess. After it finishes py.test + waits until a file in your project changes and then re-runs only the + failing tests. This is repeated until all tests pass after which again + a full run is performed. + +* Load-balancing: if you have multiple CPUs or hosts you can use + those for a combined test run. This allows to speed up + development or to use special resources of remote machines. + +* Multi-Platform coverage: you can specify different Python interpreters + or different platforms and run tests in parallel on all of them. + +Before running tests remotely, ``py.test`` efficiently synchronizes your +program source code to the remote place. All test results +are reported back and displayed to your local test session. +You may specify different Python versions and interpreters. + + +Usage examples +--------------------- + +Speed up test runs by sending tests to multiple CPUs ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To send tests to multiple CPUs, type:: + + py.test -n NUM + +Especially for longer running tests or tests requiring +a lot of IO this can lead to considerable speed ups. + + +Running tests in a Python subprocess ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To instantiate a python2.4 sub process and send tests to it, you may type:: + + py.test -d --tx popen//python=python2.4 + +This will start a subprocess which is run with the "python2.4" +Python interpreter, found in your system binary lookup path. + +If you prefix the --tx option value like this:: + + --tx 3*popen//python=python2.4 + +then three subprocesses would be created and tests +will be load-balanced across these three processes. + + +Sending tests to remote SSH accounts ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Suppose you have a package ``mypkg`` which contains some +tests that you can successfully run locally. And you +have a ssh-reachable machine ``myhost``. Then +you can ad-hoc distribute your tests by typing:: + + py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg + +This will synchronize your ``mypkg`` package directory +to an remote ssh account and then locally collect tests +and send them to remote places for execution. + +You can specify multiple ``--rsyncdir`` directories +to be sent to the remote side. + +**NOTE:** For py.test to collect and send tests correctly +you not only need to make sure all code and tests +directories are rsynced, but that any test (sub) directory +also has an ``__init__.py`` file because internally +py.test references tests as a fully qualified python +module path. **You will otherwise get strange errors** +during setup of the remote side. + +Sending tests to remote Socket Servers ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Download the single-module `socketserver.py`_ Python program +and run it like this:: + + python socketserver.py + +It will tell you that it starts listening on the default +port. You can now on your home machine specify this +new socket host with something like this:: + + py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg + + +.. _`atonce`: + +Running tests on many platforms at once ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The basic command to run tests on multiple platforms is:: + + py.test --dist=each --tx=spec1 --tx=spec2 + +If you specify a windows host, an OSX host and a Linux +environment this command will send each tests to all +platforms - and report back failures from all platforms +at once. The specifications strings use the `xspec syntax`_. + +.. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec + +.. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py + +.. _`execnet`: http://codespeak.net/execnet + +Specifying test exec environments in a conftest.py ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Instead of specifying command line options, you can +put options values in a ``conftest.py`` file like this:: + + pytest_option_tx = ['ssh=myhost//python=python2.5', 'popen//python=python2.5'] + pytest_option_dist = True + +Any commandline ``--tx`` specifictions will add to the list of available execution +environments. + +Specifying "rsync" dirs in a conftest.py ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +In your ``mypkg/conftest.py`` you may specify directories to synchronise +or to exclude:: + + rsyncdirs = ['.', '../plugins'] + rsyncignore = ['_cache'] + +These directory specifications are relative to the directory +where the ``conftest.py`` is found. + +command line options +-------------------- + + +``-f, --looponfail`` + run tests in subprocess, wait for modified files and re-run failing test set until all pass. +``-n numprocesses`` + shortcut for '--dist=load --tx=NUM*popen' +``--boxed`` + box each test run in a separate process (unix) +``--dist=distmode`` + set mode for distributing tests to exec environments. + + each: send each test to each available environment. + + load: send each test to available environment. + + (default) no: run tests inprocess, don't distribute. +``--tx=xspec`` + add a test execution environment. some examples: --tx popen//python=python2.5 --tx socket=192.168.1.102:8888 --tx ssh=user at codespeak.net//chdir=testcache +``-d`` + load-balance tests. shortcut for '--dist=load' +``--rsyncdir=dir1`` + add directory for rsyncing to remote tx nodes. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `plugin.py`_ plugin source code +2. put it somewhere as ``plugin.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/bin-for-dist/test_install.py +++ b/bin-for-dist/test_install.py @@ -175,7 +175,8 @@ def test_cmdline_entrypoints(monkeypatch for script in unversioned_scripts: assert script in points -def test_slave_popen_needs_no_pylib(testdir, venv): +def test_slave_popen_needs_no_pylib(testdir, venv, pytestconfig): + pytestconfig.pluginmanager.skipifmissing("xdist") venv.ensure() #xxx execnet optimizes popen #ch = venv.makegateway().remote_exec("import execnet") @@ -192,8 +193,10 @@ def test_slave_popen_needs_no_pylib(test "*1 passed*" ]) -def test_slave_needs_no_execnet(testdir, specssh): - gw = execnet.makegateway(specssh) +def test_slave_needs_no_execnet(testdir, sshhost, pytestconfig): + pytestconfig.pluginmanager.skipifmissing("xdist") + xspec = "ssh=%s" % sshhost + gw = execnet.makegateway("ssh=%s" % sshhost) ch = gw.remote_exec(""" import os, subprocess subprocess.call(["virtualenv", "--no-site-packages", "subdir"]) @@ -207,7 +210,7 @@ def test_slave_needs_no_execnet(testdir, e = sys.exc_info()[1] py.test.skip("could not prepare ssh slave:%s" % str(e)) gw.exit() - newspec = "%s//python=%s//chdir=%s" % (specssh, path, chdir) + newspec = "%s//python=%s//chdir=%s" % (xspec, path, chdir) gw = execnet.makegateway(newspec) ch = gw.remote_exec("import execnet") py.test.raises(ch.RemoteError, ch.waitclose) --- a/testing/pytest/test_deprecated_api.py +++ b/testing/pytest/test_deprecated_api.py @@ -265,42 +265,6 @@ def test_config_cmdline_options(recwarn, recwarn.pop(DeprecationWarning) assert config.option.gdest == 17 -def test_dist_conftest_options(testdir): - p1 = testdir.tmpdir.ensure("dir", 'p1.py') - p1.dirpath("__init__.py").write("") - p1.dirpath("conftest.py").write(py.code.Source(""" - import py - from py.builtin import print_ - print_("importing conftest", __file__) - Option = py.test.config.Option - option = py.test.config.addoptions("someopt", - Option('--someopt', action="store_true", - dest="someopt", default=False)) - dist_rsync_roots = ['../dir'] - print_("added options", option) - print_("config file seen from conftest", py.test.config) - """)) - p1.write(py.code.Source(""" - import py - from %s import conftest - from py.builtin import print_ - def test_1(): - print_("config from test_1", py.test.config) - print_("conftest from test_1", conftest.__file__) - print_("test_1: py.test.config.option.someopt", py.test.config.option.someopt) - print_("test_1: conftest", conftest) - print_("test_1: conftest.option.someopt", conftest.option.someopt) - assert conftest.option.someopt - """ % p1.dirpath().purebasename )) - result = testdir.runpytest('-d', '--tx=popen', p1, '--someopt') - assert result.ret == 0 - result.stderr.fnmatch_lines([ - "*Deprecation*pytest_addoptions*", - ]) - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) - def test_conftest_non_python_items(recwarn, testdir): testdir.makepyfile(conftest=""" import py --- a/testing/pytest/looponfail/test_util.py +++ /dev/null @@ -1,61 +0,0 @@ -import py -from py.impl.test.looponfail.util import StatRecorder - -def test_filechange(tmpdir): - tmp = tmpdir - hello = tmp.ensure("hello.py") - sd = StatRecorder([tmp]) - changed = sd.check() - assert not changed - - hello.write("world") - changed = sd.check() - assert changed - - tmp.ensure("new.py") - changed = sd.check() - assert changed - - tmp.join("new.py").remove() - changed = sd.check() - assert changed - - tmp.join("a", "b", "c.py").ensure() - changed = sd.check() - assert changed - - tmp.join("a", "c.txt").ensure() - changed = sd.check() - assert changed - changed = sd.check() - assert not changed - - tmp.join("a").remove() - changed = sd.check() - assert changed - -def test_pycremoval(tmpdir): - tmp = tmpdir - hello = tmp.ensure("hello.py") - sd = StatRecorder([tmp]) - changed = sd.check() - assert not changed - - pycfile = hello + "c" - pycfile.ensure() - changed = sd.check() - assert not changed - - hello.write("world") - changed = sd.check() - assert not pycfile.check() - - -def test_waitonchange(tmpdir, monkeypatch): - tmp = tmpdir - sd = StatRecorder([tmp]) - - l = [True, False] - monkeypatch.setattr(StatRecorder, 'check', lambda self: l.pop()) - sd.waitonchange(checkinterval=0.2) - assert not l --- a/py/impl/test/dist/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- a/doc/test/plugin/skipping.txt +++ b/doc/test/plugin/skipping.txt @@ -1,8 +1,7 @@ - -pytest_skipping plugin -====================== advanced skipping for python test functions, classes or modules. +================================================================ + .. contents:: :local: --- a/py/impl/test/dist/mypickle.py +++ /dev/null @@ -1,187 +0,0 @@ -""" - - Pickling support for two processes that want to exchange - *immutable* object instances. Immutable in the sense - that the receiving side of an object can modify its - copy but when it sends it back the original sending - side will continue to see its unmodified version - (and no actual state will go over the wire). - - This module also implements an experimental - execnet pickling channel using this idea. - -""" - -import py -import sys, os, struct -#debug = open("log-mypickle-%d" % os.getpid(), 'w') - -if sys.version_info >= (3,0): - makekey = lambda x: x - fromkey = lambda x: x - from pickle import _Pickler as Pickler - from pickle import _Unpickler as Unpickler -else: - makekey = str - fromkey = int - from pickle import Pickler, Unpickler - - -class MyPickler(Pickler): - """ Pickler with a custom memoize() - to take care of unique ID creation. - See the usage in ImmutablePickler - XXX we could probably extend Pickler - and Unpickler classes to directly - update the other'S memos. - """ - def __init__(self, file, protocol, uneven): - Pickler.__init__(self, file, protocol) - self.uneven = uneven - - def memoize(self, obj): - if self.fast: - return - assert id(obj) not in self.memo - memo_len = len(self.memo) - key = memo_len * 2 + self.uneven - self.write(self.put(key)) - self.memo[id(obj)] = key, obj - - #if sys.version_info < (3,0): - # def save_string(self, obj, pack=struct.pack): - # obj = unicode(obj) - # self.save_unicode(obj, pack=pack) - # Pickler.dispatch[str] = save_string - -class ImmutablePickler: - def __init__(self, uneven, protocol=0): - """ ImmutablePicklers are instantiated in Pairs. - The two sides need to create unique IDs - while pickling their objects. This is - done by using either even or uneven - numbers, depending on the instantiation - parameter. - """ - self._picklememo = {} - self._unpicklememo = {} - self._protocol = protocol - self.uneven = uneven and 1 or 0 - - def selfmemoize(self, obj): - # this is for feeding objects to ourselfes - # which be the case e.g. if you want to pickle - # from a forked process back to the original - f = py.io.BytesIO() - pickler = MyPickler(f, self._protocol, uneven=self.uneven) - pickler.memo = self._picklememo - pickler.memoize(obj) - self._updateunpicklememo() - - def dumps(self, obj): - f = py.io.BytesIO() - pickler = MyPickler(f, self._protocol, uneven=self.uneven) - pickler.memo = self._picklememo - pickler.dump(obj) - if obj is not None: - self._updateunpicklememo() - #print >>debug, "dumped", obj - #print >>debug, "picklememo", self._picklememo - return f.getvalue() - - def loads(self, string): - f = py.io.BytesIO(string) - unpickler = Unpickler(f) - unpickler.memo = self._unpicklememo - res = unpickler.load() - self._updatepicklememo() - #print >>debug, "loaded", res - #print >>debug, "unpicklememo", self._unpicklememo - return res - - def _updatepicklememo(self): - for x, obj in self._unpicklememo.items(): - self._picklememo[id(obj)] = (fromkey(x), obj) - - def _updateunpicklememo(self): - for key,obj in self._picklememo.values(): - key = makekey(key) - if key in self._unpicklememo: - assert self._unpicklememo[key] is obj - self._unpicklememo[key] = obj - -NO_ENDMARKER_WANTED = object() - -class UnpickleError(Exception): - """ Problems while unpickling. """ - def __init__(self, formatted): - self.formatted = formatted - Exception.__init__(self, formatted) - def __str__(self): - return self.formatted - -class PickleChannel(object): - """ PickleChannels wrap execnet channels - and allow to send/receive by using - "immutable pickling". - """ - _unpicklingerror = None - def __init__(self, channel): - self._channel = channel - # we use the fact that each side of a - # gateway connection counts with uneven - # or even numbers depending on which - # side it is (for the purpose of creating - # unique ids - which is what we need it here for) - uneven = channel.gateway._channelfactory.count % 2 - self._ipickle = ImmutablePickler(uneven=uneven) - self.RemoteError = channel.RemoteError - - def send(self, obj): - pickled_obj = self._ipickle.dumps(obj) - self._channel.send(pickled_obj) - - def receive(self): - pickled_obj = self._channel.receive() - return self._unpickle(pickled_obj) - - def _unpickle(self, pickled_obj): - if isinstance(pickled_obj, self._channel.__class__): - return pickled_obj - return self._ipickle.loads(pickled_obj) - - def _getremoteerror(self): - return self._unpicklingerror or self._channel._getremoteerror() - - def close(self): - return self._channel.close() - - def isclosed(self): - return self._channel.isclosed() - - def waitclose(self, timeout=None): - return self._channel.waitclose(timeout=timeout) - - def setcallback(self, callback, endmarker=NO_ENDMARKER_WANTED): - if endmarker is NO_ENDMARKER_WANTED: - def unpickle_callback(pickled_obj): - obj = self._unpickle(pickled_obj) - callback(obj) - self._channel.setcallback(unpickle_callback) - return - uniqueendmarker = object() - def unpickle_callback(pickled_obj): - if pickled_obj is uniqueendmarker: - return callback(endmarker) - try: - obj = self._unpickle(pickled_obj) - except KeyboardInterrupt: - raise - except: - excinfo = py.code.ExceptionInfo() - formatted = str(excinfo.getrepr(showlocals=True,funcargs=True)) - self._unpicklingerror = UnpickleError(formatted) - callback(endmarker) - else: - callback(obj) - self._channel.setcallback(unpickle_callback, uniqueendmarker) From commits-noreply at bitbucket.org Wed Jan 13 16:18:13 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 15:18:13 +0000 (UTC) Subject: [py-svn] py-trunk commit 626e56a02369: flatten test directory hierarchy and merge smaller into larger files Message-ID: <20100113151813.926A37EE78@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263395870 -3600 # Node ID 626e56a0236940356418324bf7d10e5e4ea63631 # Parent 3fee6fd40486fd5b234a5fc6bf61ef1aa03c1c6d flatten test directory hierarchy and merge smaller into larger files --- a/testing/pytest/test_config.py +++ /dev/null @@ -1,248 +0,0 @@ -import py -from py.impl.test.collect import RootCollector - - -class TestConfigCmdlineParsing: - def test_parser_addoption_default_env(self, testdir, monkeypatch): - import os - config = testdir.Config() - group = config._parser.getgroup("hello") - - monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION1', 'True') - group.addoption("--option1", action="store_true") - assert group.options[0].default == True - - monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION2', 'abc') - group.addoption("--option2", action="store", default="x") - assert group.options[1].default == "abc" - - monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION3', '32') - group.addoption("--option3", action="store", type="int") - assert group.options[2].default == 32 - - group.addoption("--option4", action="store", type="int") - assert group.options[3].default == ("NO", "DEFAULT") - - def test_parser_addoption_default_conftest(self, testdir, monkeypatch): - import os - testdir.makeconftest("option_verbose=True") - config = testdir.parseconfig() - assert config.option.verbose - - def test_parsing_again_fails(self, testdir): - config = testdir.reparseconfig([testdir.tmpdir]) - py.test.raises(AssertionError, "config.parse([])") - - -class TestConfigTmpdir: - def test_getbasetemp(self, testdir): - config = testdir.Config() - config.basetemp = "hello" - config.getbasetemp() == "hello" - - def test_mktemp(self, testdir): - config = testdir.Config() - config.basetemp = testdir.mkdir("hello") - tmp = config.mktemp("world") - assert tmp.relto(config.basetemp) == "world" - tmp = config.mktemp("this", numbered=True) - assert tmp.relto(config.basetemp).startswith("this") - tmp2 = config.mktemp("this", numbered=True) - assert tmp2.relto(config.basetemp).startswith("this") - assert tmp2 != tmp - - def test_reparse(self, testdir): - config2 = testdir.reparseconfig([]) - config3 = testdir.reparseconfig([]) - assert config2.getbasetemp() != config3.getbasetemp() - assert not config2.getbasetemp().relto(config3.getbasetemp()) - assert not config3.getbasetemp().relto(config2.getbasetemp()) - -class TestConfigAPI: - - def test_config_getvalue_honours_conftest(self, testdir): - testdir.makepyfile(conftest="x=1") - testdir.mkdir("sub").join("conftest.py").write("x=2 ; y = 3") - config = testdir.parseconfig() - o = testdir.tmpdir - assert config.getvalue("x") == 1 - assert config.getvalue("x", o.join('sub')) == 2 - py.test.raises(KeyError, "config.getvalue('y')") - config = testdir.reparseconfig([str(o.join('sub'))]) - assert config.getvalue("x") == 2 - assert config.getvalue("y") == 3 - assert config.getvalue("x", o) == 1 - py.test.raises(KeyError, 'config.getvalue("y", o)') - - def test_config_getvalueorskip(self, testdir): - from py.impl.test.outcome import Skipped - config = testdir.parseconfig() - py.test.raises(Skipped, "config.getvalueorskip('hello')") - verbose = config.getvalueorskip("verbose") - assert verbose == config.option.verbose - config.option.hello = None - py.test.raises(Skipped, "config.getvalueorskip('hello')") - - def test_config_overwrite(self, testdir): - o = testdir.tmpdir - o.ensure("conftest.py").write("x=1") - config = testdir.reparseconfig([str(o)]) - assert config.getvalue('x') == 1 - config.option.x = 2 - assert config.getvalue('x') == 2 - config = testdir.reparseconfig([str(o)]) - assert config.getvalue('x') == 1 - - def test_getconftest_pathlist(self, testdir, tmpdir): - somepath = tmpdir.join("x", "y", "z") - p = tmpdir.join("conftest.py") - p.write("pathlist = ['.', %r]" % str(somepath)) - config = testdir.reparseconfig([p]) - assert config.getconftest_pathlist('notexist') is None - pl = config.getconftest_pathlist('pathlist') - print(pl) - assert len(pl) == 2 - assert pl[0] == tmpdir - assert pl[1] == somepath - - def test_setsessionclass_and_initsession(self, testdir): - config = testdir.Config() - class Session1: - def __init__(self, config): - self.config = config - config.setsessionclass(Session1) - session = config.initsession() - assert isinstance(session, Session1) - assert session.config is config - py.test.raises(ValueError, "config.setsessionclass(Session1)") - - -class TestConfigApi_getinitialnodes: - def test_onedir(self, testdir): - config = testdir.reparseconfig([testdir.tmpdir]) - colitems = config.getinitialnodes() - assert len(colitems) == 1 - col = colitems[0] - assert isinstance(col, py.test.collect.Directory) - for col in col.listchain(): - assert col.config is config - - def test_twodirs(self, testdir, tmpdir): - config = testdir.reparseconfig([tmpdir, tmpdir]) - colitems = config.getinitialnodes() - assert len(colitems) == 2 - col1, col2 = colitems - assert col1.name == col2.name - assert col1.parent == col2.parent - - def test_curdir_and_subdir(self, testdir, tmpdir): - a = tmpdir.ensure("a", dir=1) - config = testdir.reparseconfig([tmpdir, a]) - colitems = config.getinitialnodes() - assert len(colitems) == 2 - col1, col2 = colitems - assert col1.name == tmpdir.basename - assert col2.name == 'a' - for col in colitems: - for subcol in col.listchain(): - assert col.config is config - - def test_global_file(self, testdir, tmpdir): - x = tmpdir.ensure("x.py") - config = testdir.reparseconfig([x]) - col, = config.getinitialnodes() - assert isinstance(col, py.test.collect.Module) - assert col.name == 'x.py' - assert col.parent.name == tmpdir.basename - assert isinstance(col.parent.parent, RootCollector) - for col in col.listchain(): - assert col.config is config - - def test_global_dir(self, testdir, tmpdir): - x = tmpdir.ensure("a", dir=1) - config = testdir.reparseconfig([x]) - col, = config.getinitialnodes() - assert isinstance(col, py.test.collect.Directory) - print(col.listchain()) - assert col.name == 'a' - assert isinstance(col.parent, RootCollector) - assert col.config is config - - def test_pkgfile(self, testdir, tmpdir): - x = tmpdir.ensure("x.py") - tmpdir.ensure("__init__.py") - config = testdir.reparseconfig([x]) - col, = config.getinitialnodes() - assert isinstance(col, py.test.collect.Module) - assert col.name == 'x.py' - assert col.parent.name == x.dirpath().basename - assert isinstance(col.parent.parent.parent, RootCollector) - for col in col.listchain(): - assert col.config is config - -class TestConfig_gettopdir: - def test_gettopdir(self, testdir): - from py.impl.test.config import gettopdir - tmp = testdir.tmpdir - assert gettopdir([tmp]) == tmp - topdir = gettopdir([tmp.join("hello"), tmp.join("world")]) - assert topdir == tmp - somefile = tmp.ensure("somefile.py") - assert gettopdir([somefile]) == tmp - - def test_gettopdir_pypkg(self, testdir): - from py.impl.test.config import gettopdir - tmp = testdir.tmpdir - a = tmp.ensure('a', dir=1) - b = tmp.ensure('a', 'b', '__init__.py') - c = tmp.ensure('a', 'b', 'c.py') - Z = tmp.ensure('Z', dir=1) - assert gettopdir([c]) == a - assert gettopdir([c, Z]) == tmp - assert gettopdir(["%s::xyc" % c]) == a - assert gettopdir(["%s::xyc::abc" % c]) == a - assert gettopdir(["%s::xyc" % c, "%s::abc" % Z]) == tmp - -def test_options_on_small_file_do_not_blow_up(testdir): - def runfiletest(opts): - reprec = testdir.inline_run(*opts) - passed, skipped, failed = reprec.countoutcomes() - assert failed == 2 - assert skipped == passed == 0 - path = testdir.makepyfile(""" - def test_f1(): assert 0 - def test_f2(): assert 0 - """) - - for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'], - ['--tb=long'], ['--fulltrace'], ['--nomagic'], - ['--traceconfig'], ['-v'], ['-v', '-v']): - runfiletest(opts + [path]) - -def test_ensuretemp(recwarn): - #py.test.deprecated_call(py.test.ensuretemp, 'hello') - d1 = py.test.ensuretemp('hello') - d2 = py.test.ensuretemp('hello') - assert d1 == d2 - assert d1.check(dir=1) - -def test_preparse_ordering(testdir, monkeypatch): - pkg_resources = py.test.importorskip("pkg_resources") - def my_iter(name): - assert name == "pytest11" - class EntryPoint: - name = "mytestplugin" - def load(self): - class PseudoPlugin: - x = 42 - return PseudoPlugin() - return iter([EntryPoint()]) - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) - testdir.makeconftest(""" - pytest_plugins = "mytestplugin", - """) - monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin") - config = testdir.parseconfig() - plugin = config.pluginmanager.getplugin("mytestplugin") - assert plugin.x == 42 - --- /dev/null +++ b/testing/test_conftesthandle.py @@ -0,0 +1,134 @@ +import py +from py.impl.test.conftesthandle import Conftest + +def pytest_generate_tests(metafunc): + if "basedir" in metafunc.funcargnames: + metafunc.addcall(param="global") + metafunc.addcall(param="inpackage") + +def pytest_funcarg__basedir(request): + def basedirmaker(request): + basedir = d = request.getfuncargvalue("tmpdir") + d.ensure("adir/conftest.py").write("a=1 ; Directory = 3") + d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5") + if request.param == "inpackage": + d.ensure("adir/__init__.py") + d.ensure("adir/b/__init__.py") + return d + return request.cached_setup(lambda: basedirmaker(request), extrakey=request.param) + +def ConftestWithSetinitial(path): + conftest = Conftest() + conftest.setinitial([path]) + return conftest + +class TestConftestValueAccessGlobal: + def test_basic_init(self, basedir): + conftest = Conftest() + conftest.setinitial([basedir.join("adir")]) + assert conftest.rget("a") == 1 + + def test_onimport(self, basedir): + l = [] + conftest = Conftest(onimport=l.append) + conftest.setinitial([basedir.join("adir")]) + assert len(l) == 1 + assert conftest.rget("a") == 1 + assert conftest.rget("b", basedir.join("adir", "b")) == 2 + assert len(l) == 2 + + def test_immediate_initialiation_and_incremental_are_the_same(self, basedir): + conftest = Conftest() + snap0 = len(conftest._path2confmods) + conftest.getconftestmodules(basedir) + snap1 = len(conftest._path2confmods) + #assert len(conftest._path2confmods) == snap1 + 1 + conftest.getconftestmodules(basedir.join('adir')) + assert len(conftest._path2confmods) == snap1 + 1 + conftest.getconftestmodules(basedir.join('b')) + assert len(conftest._path2confmods) == snap1 + 2 + + def test_default_has_lower_prio(self, basedir): + conftest = ConftestWithSetinitial(basedir.join("adir")) + assert conftest.rget('Directory') == 3 + #assert conftest.lget('Directory') == py.test.collect.Directory + + def test_value_access_not_existing(self, basedir): + conftest = ConftestWithSetinitial(basedir) + py.test.raises(KeyError, "conftest.rget('a')") + #py.test.raises(KeyError, "conftest.lget('a')") + + def test_value_access_by_path(self, basedir): + conftest = ConftestWithSetinitial(basedir) + assert conftest.rget("a", basedir.join('adir')) == 1 + #assert conftest.lget("a", basedir.join('adir')) == 1 + assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5 + #assert conftest.lget("a", basedir.join('adir', 'b')) == 1 + #assert conftest.lget("b", basedir.join('adir', 'b')) == 2 + #assert py.test.raises(KeyError, + # 'conftest.lget("b", basedir.join("a"))' + #) + + def test_value_access_with_init_one_conftest(self, basedir): + conftest = ConftestWithSetinitial(basedir.join('adir')) + assert conftest.rget("a") == 1 + #assert conftest.lget("a") == 1 + + def test_value_access_with_init_two_conftests(self, basedir): + conftest = ConftestWithSetinitial(basedir.join("adir", "b")) + conftest.rget("a") == 1.5 + #conftest.lget("a") == 1 + #conftest.lget("b") == 1 + + def test_value_access_with_confmod(self, basedir): + topdir = basedir.join("adir", "b") + topdir.ensure("xx", dir=True) + conftest = ConftestWithSetinitial(topdir) + mod, value = conftest.rget_with_confmod("a", topdir) + assert value == 1.5 + path = py.path.local(mod.__file__) + assert path.dirpath() == basedir.join("adir", "b") + assert path.purebasename == "conftest" + +def test_conftestcutdir(testdir): + conf = testdir.makeconftest("") + p = testdir.mkdir("x") + conftest = Conftest(confcutdir=p) + conftest.setinitial([testdir.tmpdir]) + l = conftest.getconftestmodules(p) + assert len(l) == 0 + l = conftest.getconftestmodules(conf.dirpath()) + assert len(l) == 0 + assert conf not in conftest._conftestpath2mod + # but we can still import a conftest directly + conftest.importconftest(conf) + l = conftest.getconftestmodules(conf.dirpath()) + assert l[0].__file__.startswith(str(conf)) + # and all sub paths get updated properly + l = conftest.getconftestmodules(p) + assert len(l) == 1 + assert l[0].__file__.startswith(str(conf)) + +def test_conftestcutdir_inplace_considered(testdir): + conf = testdir.makeconftest("") + conftest = Conftest(confcutdir=conf.dirpath()) + conftest.setinitial([conf.dirpath()]) + l = conftest.getconftestmodules(conf.dirpath()) + assert len(l) == 1 + assert l[0].__file__.startswith(str(conf)) + +def test_setinitial_confcut(testdir): + conf = testdir.makeconftest("") + sub = testdir.mkdir("sub") + sub.chdir() + for opts in (["--confcutdir=%s" % sub, sub], + [sub, "--confcutdir=%s" % sub], + ["--confcutdir=.", sub], + [sub, "--confcutdir", sub], + [str(sub), "--confcutdir", "."], + ): + conftest = Conftest() + conftest.setinitial(opts) + assert conftest._confcutdir == sub + assert conftest.getconftestmodules(sub) == [] + assert conftest.getconftestmodules(conf.dirpath()) == [] --- a/testing/pytest/test_outcome.py +++ /dev/null @@ -1,71 +0,0 @@ - -import py -import sys - -class TestRaises: - def test_raises(self): - py.test.raises(ValueError, "int('qwe')") - - def test_raises_exec(self): - py.test.raises(ValueError, "a,x = []") - - def test_raises_syntax_error(self): - py.test.raises(SyntaxError, "qwe qwe qwe") - - def test_raises_function(self): - py.test.raises(ValueError, int, 'hello') - -def test_pytest_exit(): - try: - py.test.exit("hello") - except: - excinfo = py.code.ExceptionInfo() - assert excinfo.errisinstance(KeyboardInterrupt) - -def test_exception_printing_skip(): - try: - py.test.skip("hello") - except Exception: - excinfo = py.code.ExceptionInfo() - s = excinfo.exconly(tryshort=True) - assert s.startswith("Skipped") - -def test_importorskip(): - from py.impl.test.outcome import Skipped, importorskip - assert importorskip == py.test.importorskip - try: - sys = importorskip("sys") - assert sys == py.std.sys - #path = py.test.importorskip("os.path") - #assert path == py.std.os.path - py.test.raises(Skipped, "py.test.importorskip('alskdj')") - py.test.raises(SyntaxError, "py.test.importorskip('x y z')") - py.test.raises(SyntaxError, "py.test.importorskip('x=y')") - path = importorskip("py", minversion=".".join(py.__version__)) - mod = py.std.types.ModuleType("hello123") - mod.__version__ = "1.3" - py.test.raises(Skipped, """ - py.test.importorskip("hello123", minversion="5.0") - """) - except Skipped: - print(py.code.ExceptionInfo()) - py.test.fail("spurious skip") - -def test_importorskip_imports_last_module_part(): - import os - ospath = py.test.importorskip("os.path") - assert os.path == ospath - - -def test_pytest_cmdline_main(testdir): - p = testdir.makepyfile(""" - import sys - sys.path.insert(0, %r) - import py - def test_hello(): - assert 1 - if __name__ == '__main__': - py.test.cmdline.main([__file__]) - """ % (str(py._pydir.dirpath()))) - import subprocess - subprocess.check_call([sys.executable, str(p)]) --- a/testing/pytest/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ - -pytest_plugins = "skipping", "pytester", "tmpdir" - --- /dev/null +++ b/testing/root/test_py_imports.py @@ -0,0 +1,69 @@ +import py +import types +import sys + +def checksubpackage(name): + obj = getattr(py, name) + if hasattr(obj, '__map__'): # isinstance(obj, Module): + keys = dir(obj) + assert len(keys) > 0 + print (obj.__map__) + for name in obj.__map__: + assert hasattr(obj, name), (obj, name) + +def test_dir(): + for name in dir(py): + if not name.startswith('_'): + yield checksubpackage, name + +def test_virtual_module_identity(): + from py import path as path1 + from py import path as path2 + assert path1 is path2 + from py.path import local as local1 + from py.path import local as local2 + assert local1 is local2 + +def test_importall(): + base = py._pydir.join("impl") + nodirs = [ + base.join('test', 'testing', 'data'), + base.join('path', 'gateway',), + base.join('code', 'oldmagic.py'), + base.join('compat', 'testing'), + ] + if sys.version_info >= (3,0): + nodirs.append(base.join('code', '_assertionold.py')) + else: + nodirs.append(base.join('code', '_assertionnew.py')) + + def recurse(p): + return p.check(dotfile=0) and p.basename != "attic" + + for p in base.visit('*.py', recurse): + if p.basename == '__init__.py': + continue + relpath = p.new(ext='').relto(base) + if base.sep in relpath: # not py/*.py itself + for x in nodirs: + if p == x or p.relto(x): + break + else: + relpath = relpath.replace(base.sep, '.') + modpath = 'py.impl.%s' % relpath + check_import(modpath) + +def check_import(modpath): + py.builtin.print_("checking import", modpath) + assert __import__(modpath) + +def test_all_resolves(): + seen = py.builtin.set([py]) + lastlength = None + while len(seen) != lastlength: + lastlength = len(seen) + for item in py.builtin.frozenset(seen): + for value in item.__dict__.values(): + if isinstance(value, type(py.test)): + seen.add(value) + --- a/testing/test_py_imports.py +++ /dev/null @@ -1,69 +0,0 @@ -import py -import types -import sys - -def checksubpackage(name): - obj = getattr(py, name) - if hasattr(obj, '__map__'): # isinstance(obj, Module): - keys = dir(obj) - assert len(keys) > 0 - print (obj.__map__) - for name in obj.__map__: - assert hasattr(obj, name), (obj, name) - -def test_dir(): - for name in dir(py): - if not name.startswith('_'): - yield checksubpackage, name - -def test_virtual_module_identity(): - from py import path as path1 - from py import path as path2 - assert path1 is path2 - from py.path import local as local1 - from py.path import local as local2 - assert local1 is local2 - -def test_importall(): - base = py._pydir.join("impl") - nodirs = [ - base.join('test', 'testing', 'data'), - base.join('path', 'gateway',), - base.join('code', 'oldmagic.py'), - base.join('compat', 'testing'), - ] - if sys.version_info >= (3,0): - nodirs.append(base.join('code', '_assertionold.py')) - else: - nodirs.append(base.join('code', '_assertionnew.py')) - - def recurse(p): - return p.check(dotfile=0) and p.basename != "attic" - - for p in base.visit('*.py', recurse): - if p.basename == '__init__.py': - continue - relpath = p.new(ext='').relto(base) - if base.sep in relpath: # not py/*.py itself - for x in nodirs: - if p == x or p.relto(x): - break - else: - relpath = relpath.replace(base.sep, '.') - modpath = 'py.impl.%s' % relpath - check_import(modpath) - -def check_import(modpath): - py.builtin.print_("checking import", modpath) - assert __import__(modpath) - -def test_all_resolves(): - seen = py.builtin.set([py]) - lastlength = None - while len(seen) != lastlength: - lastlength = len(seen) - for item in py.builtin.frozenset(seen): - for value in item.__dict__.values(): - if isinstance(value, type(py.test)): - seen.add(value) - --- a/testing/pytest/test_compat.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import generators -import py -from py.impl.test.compat import TestCase -from py.impl.test.outcome import Failed - -class TestCompatTestCaseSetupSemantics(TestCase): - globlist = [] - - def setUp(self): - self.__dict__.setdefault('l', []).append(42) - self.globlist.append(self) - - def tearDown(self): - self.l.pop() - - def test_issetup(self): - l = self.l - assert len(l) == 1 - assert l[-1] == 42 - #self.checkmultipleinstances() - - def test_issetup2(self): - l = self.l - assert len(l) == 1 - assert l[-1] == 42 - #self.checkmultipleinstances() - - #def checkmultipleinstances(self): - # for x,y in zip(self.globlist, self.globlist[1:]): - # assert x is not y - -class TestCompatAssertions(TestCase): - nameparamdef = { - 'failUnlessEqual,assertEqual,assertEquals': ('1, 1', '1, 0'), - 'assertNotEquals,failIfEqual': ('0, 1', '0,0'), - 'failUnless,assert_': ('1', 'None'), - 'failIf': ('0', '1'), - } - - sourcelist = [] - for names, (paramok, paramfail) in nameparamdef.items(): - for name in names.split(','): - source = """ - def test_%(name)s(self): - self.%(name)s(%(paramok)s) - #self.%(name)s(%(paramfail)s) - - def test_%(name)s_failing(self): - self.assertRaises(Failed, - self.%(name)s, %(paramfail)s) - """ % locals() - co = py.code.Source(source).compile() - exec(co) --- /dev/null +++ b/testing/test_collect.py @@ -0,0 +1,238 @@ +import py + +class TestCollector: + def test_collect_versus_item(self): + from py.impl.test.collect import Collector, Item + assert not issubclass(Collector, Item) + assert not issubclass(Item, Collector) + + def test_check_equality(self, testdir): + modcol = testdir.getmodulecol(""" + def test_pass(): pass + def test_fail(): assert 0 + """) + fn1 = modcol.collect_by_name("test_pass") + assert isinstance(fn1, py.test.collect.Function) + fn2 = modcol.collect_by_name("test_pass") + assert isinstance(fn2, py.test.collect.Function) + + assert fn1 == fn2 + assert fn1 != modcol + if py.std.sys.version_info < (3, 0): + assert cmp(fn1, fn2) == 0 + assert hash(fn1) == hash(fn2) + + fn3 = modcol.collect_by_name("test_fail") + assert isinstance(fn3, py.test.collect.Function) + assert not (fn1 == fn3) + assert fn1 != fn3 + + for fn in fn1,fn2,fn3: + assert fn != 3 + assert fn != modcol + assert fn != [1,2,3] + assert [1,2,3] != fn + assert modcol != fn + + def test_getparent(self, testdir): + modcol = testdir.getmodulecol(""" + class TestClass: + def test_foo(): + pass + """) + cls = modcol.collect_by_name("TestClass") + fn = cls.collect_by_name("()").collect_by_name("test_foo") + + parent = fn.getparent(py.test.collect.Module) + assert parent is modcol + + parent = fn.getparent(py.test.collect.Function) + assert parent is fn + + parent = fn.getparent(py.test.collect.Class) + assert parent is cls + + + def test_getcustomfile_roundtrip(self, testdir): + hello = testdir.makefile(".xxx", hello="world") + testdir.makepyfile(conftest=""" + import py + class CustomFile(py.test.collect.File): + pass + class MyDirectory(py.test.collect.Directory): + def collect(self): + return [CustomFile(self.fspath.join("hello.xxx"), parent=self)] + def pytest_collect_directory(path, parent): + return MyDirectory(path, parent=parent) + """) + config = testdir.parseconfig(hello) + node = config.getnode(hello) + assert isinstance(node, py.test.collect.File) + assert node.name == "hello.xxx" + names = config._rootcol.totrail(node) + node = config._rootcol.getbynames(names) + assert isinstance(node, py.test.collect.File) + +class TestCollectFS: + def test_ignored_certain_directories(self, testdir): + tmpdir = testdir.tmpdir + tmpdir.ensure("_darcs", 'test_notfound.py') + tmpdir.ensure("CVS", 'test_notfound.py') + tmpdir.ensure("{arch}", 'test_notfound.py') + tmpdir.ensure(".whatever", 'test_notfound.py') + tmpdir.ensure(".bzr", 'test_notfound.py') + tmpdir.ensure("normal", 'test_found.py') + tmpdir.ensure('test_found.py') + + col = testdir.parseconfig(tmpdir).getnode(tmpdir) + items = col.collect() + names = [x.name for x in items] + assert len(items) == 2 + assert 'normal' in names + assert 'test_found.py' in names + + def test_found_certain_testfiles(self, testdir): + p1 = testdir.makepyfile(test_found = "pass", found_test="pass") + col = testdir.parseconfig(p1).getnode(p1.dirpath()) + items = col.collect() # Directory collect returns files sorted by name + assert len(items) == 2 + assert items[1].name == 'test_found.py' + assert items[0].name == 'found_test.py' + + def test_directory_file_sorting(self, testdir): + p1 = testdir.makepyfile(test_one="hello") + p1.dirpath().mkdir("x") + p1.dirpath().mkdir("dir1") + testdir.makepyfile(test_two="hello") + p1.dirpath().mkdir("dir2") + config = testdir.parseconfig() + col = config.getnode(p1.dirpath()) + names = [x.name for x in col.collect()] + assert names == ["dir1", "dir2", "test_one.py", "test_two.py", "x"] + +class TestCollectPluginHookRelay: + def test_pytest_collect_file(self, testdir): + tmpdir = testdir.tmpdir + wascalled = [] + class Plugin: + def pytest_collect_file(self, path, parent): + wascalled.append(path) + config = testdir.Config() + config.pluginmanager.register(Plugin()) + config.parse([tmpdir]) + col = config.getnode(tmpdir) + testdir.makefile(".abc", "xyz") + res = col.collect() + assert len(wascalled) == 1 + assert wascalled[0].ext == '.abc' + + def test_pytest_collect_directory(self, testdir): + tmpdir = testdir.tmpdir + wascalled = [] + class Plugin: + def pytest_collect_directory(self, path, parent): + wascalled.append(path.basename) + return parent.Directory(path, parent) + testdir.plugins.append(Plugin()) + testdir.mkdir("hello") + testdir.mkdir("world") + reprec = testdir.inline_run() + assert "hello" in wascalled + assert "world" in wascalled + # make sure the directories do not get double-appended + colreports = reprec.getreports("pytest_collectreport") + names = [rep.collector.name for rep in colreports] + assert names.count("hello") == 1 + +class TestPrunetraceback: + def test_collection_error(self, testdir): + p = testdir.makepyfile(""" + import not_exists + """) + result = testdir.runpytest(p) + assert "__import__" not in result.stdout.str(), "too long traceback" + result.stdout.fnmatch_lines([ + "*ERROR during collection*", + "*mport*not_exists*" + ]) + +class TestCustomConftests: + def test_collectignore_exclude_on_option(self, testdir): + testdir.makeconftest(""" + collect_ignore = ['hello', 'test_world.py'] + def pytest_addoption(parser): + parser.addoption("--XX", action="store_true", default=False) + def pytest_configure(config): + if config.getvalue("XX"): + collect_ignore[:] = [] + """) + testdir.mkdir("hello") + testdir.makepyfile(test_world="#") + reprec = testdir.inline_run(testdir.tmpdir) + names = [rep.collector.name for rep in reprec.getreports("pytest_collectreport")] + assert 'hello' not in names + assert 'test_world.py' not in names + reprec = testdir.inline_run(testdir.tmpdir, "--XX") + names = [rep.collector.name for rep in reprec.getreports("pytest_collectreport")] + assert 'hello' in names + assert 'test_world.py' in names + + def test_pytest_fs_collect_hooks_are_seen(self, testdir): + testdir.makeconftest(""" + import py + class MyDirectory(py.test.collect.Directory): + pass + class MyModule(py.test.collect.Module): + pass + def pytest_collect_directory(path, parent): + return MyDirectory(path, parent) + def pytest_collect_file(path, parent): + return MyModule(path, parent) + """) + testdir.makepyfile("def test_x(): pass") + result = testdir.runpytest("--collectonly") + result.stdout.fnmatch_lines([ + "*MyDirectory*", + "*MyModule*", + "*test_x*" + ]) + +class TestRootCol: + def test_totrail_and_back(self, testdir, tmpdir): + a = tmpdir.ensure("a", dir=1) + tmpdir.ensure("a", "__init__.py") + x = tmpdir.ensure("a", "trail.py") + config = testdir.reparseconfig([x]) + col = config.getnode(x) + trail = config._rootcol.totrail(col) + col2 = config._rootcol.fromtrail(trail) + assert col2 == col + + def test_totrail_topdir_and_beyond(self, testdir, tmpdir): + config = testdir.reparseconfig() + col = config.getnode(config.topdir) + trail = config._rootcol.totrail(col) + col2 = config._rootcol.fromtrail(trail) + assert col2.fspath == config.topdir + assert len(col2.listchain()) == 1 + py.test.raises(config.Error, "config.getnode(config.topdir.dirpath())") + #col3 = config.getnode(config.topdir.dirpath()) + #py.test.raises(ValueError, + # "col3._totrail()") + + def test_argid(self, testdir, tmpdir): + cfg = testdir.parseconfig() + p = testdir.makepyfile("def test_func(): pass") + item = cfg.getnode("%s::test_func" % p) + assert item.name == "test_func" + + def test_argid_with_method(self, testdir, tmpdir): + cfg = testdir.parseconfig() + p = testdir.makepyfile(""" + class TestClass: + def test_method(self): pass + """) + item = cfg.getnode("%s::TestClass::()::test_method" % p) + assert item.name == "test_method" + item = cfg.getnode("%s::TestClass::test_method" % p) + assert item.name == "test_method" --- a/testing/pytest/test_conftesthandle.py +++ /dev/null @@ -1,134 +0,0 @@ -import py -from py.impl.test.conftesthandle import Conftest - -def pytest_generate_tests(metafunc): - if "basedir" in metafunc.funcargnames: - metafunc.addcall(param="global") - metafunc.addcall(param="inpackage") - -def pytest_funcarg__basedir(request): - def basedirmaker(request): - basedir = d = request.getfuncargvalue("tmpdir") - d.ensure("adir/conftest.py").write("a=1 ; Directory = 3") - d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5") - if request.param == "inpackage": - d.ensure("adir/__init__.py") - d.ensure("adir/b/__init__.py") - return d - return request.cached_setup(lambda: basedirmaker(request), extrakey=request.param) - -def ConftestWithSetinitial(path): - conftest = Conftest() - conftest.setinitial([path]) - return conftest - -class TestConftestValueAccessGlobal: - def test_basic_init(self, basedir): - conftest = Conftest() - conftest.setinitial([basedir.join("adir")]) - assert conftest.rget("a") == 1 - - def test_onimport(self, basedir): - l = [] - conftest = Conftest(onimport=l.append) - conftest.setinitial([basedir.join("adir")]) - assert len(l) == 1 - assert conftest.rget("a") == 1 - assert conftest.rget("b", basedir.join("adir", "b")) == 2 - assert len(l) == 2 - - def test_immediate_initialiation_and_incremental_are_the_same(self, basedir): - conftest = Conftest() - snap0 = len(conftest._path2confmods) - conftest.getconftestmodules(basedir) - snap1 = len(conftest._path2confmods) - #assert len(conftest._path2confmods) == snap1 + 1 - conftest.getconftestmodules(basedir.join('adir')) - assert len(conftest._path2confmods) == snap1 + 1 - conftest.getconftestmodules(basedir.join('b')) - assert len(conftest._path2confmods) == snap1 + 2 - - def test_default_has_lower_prio(self, basedir): - conftest = ConftestWithSetinitial(basedir.join("adir")) - assert conftest.rget('Directory') == 3 - #assert conftest.lget('Directory') == py.test.collect.Directory - - def test_value_access_not_existing(self, basedir): - conftest = ConftestWithSetinitial(basedir) - py.test.raises(KeyError, "conftest.rget('a')") - #py.test.raises(KeyError, "conftest.lget('a')") - - def test_value_access_by_path(self, basedir): - conftest = ConftestWithSetinitial(basedir) - assert conftest.rget("a", basedir.join('adir')) == 1 - #assert conftest.lget("a", basedir.join('adir')) == 1 - assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5 - #assert conftest.lget("a", basedir.join('adir', 'b')) == 1 - #assert conftest.lget("b", basedir.join('adir', 'b')) == 2 - #assert py.test.raises(KeyError, - # 'conftest.lget("b", basedir.join("a"))' - #) - - def test_value_access_with_init_one_conftest(self, basedir): - conftest = ConftestWithSetinitial(basedir.join('adir')) - assert conftest.rget("a") == 1 - #assert conftest.lget("a") == 1 - - def test_value_access_with_init_two_conftests(self, basedir): - conftest = ConftestWithSetinitial(basedir.join("adir", "b")) - conftest.rget("a") == 1.5 - #conftest.lget("a") == 1 - #conftest.lget("b") == 1 - - def test_value_access_with_confmod(self, basedir): - topdir = basedir.join("adir", "b") - topdir.ensure("xx", dir=True) - conftest = ConftestWithSetinitial(topdir) - mod, value = conftest.rget_with_confmod("a", topdir) - assert value == 1.5 - path = py.path.local(mod.__file__) - assert path.dirpath() == basedir.join("adir", "b") - assert path.purebasename == "conftest" - -def test_conftestcutdir(testdir): - conf = testdir.makeconftest("") - p = testdir.mkdir("x") - conftest = Conftest(confcutdir=p) - conftest.setinitial([testdir.tmpdir]) - l = conftest.getconftestmodules(p) - assert len(l) == 0 - l = conftest.getconftestmodules(conf.dirpath()) - assert len(l) == 0 - assert conf not in conftest._conftestpath2mod - # but we can still import a conftest directly - conftest.importconftest(conf) - l = conftest.getconftestmodules(conf.dirpath()) - assert l[0].__file__.startswith(str(conf)) - # and all sub paths get updated properly - l = conftest.getconftestmodules(p) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) - -def test_conftestcutdir_inplace_considered(testdir): - conf = testdir.makeconftest("") - conftest = Conftest(confcutdir=conf.dirpath()) - conftest.setinitial([conf.dirpath()]) - l = conftest.getconftestmodules(conf.dirpath()) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) - -def test_setinitial_confcut(testdir): - conf = testdir.makeconftest("") - sub = testdir.mkdir("sub") - sub.chdir() - for opts in (["--confcutdir=%s" % sub, sub], - [sub, "--confcutdir=%s" % sub], - ["--confcutdir=.", sub], - [sub, "--confcutdir", sub], - [str(sub), "--confcutdir", "."], - ): - conftest = Conftest() - conftest.setinitial(opts) - assert conftest._confcutdir == sub - assert conftest.getconftestmodules(sub) == [] - assert conftest.getconftestmodules(conf.dirpath()) == [] --- /dev/null +++ b/testing/test_genitems.py @@ -0,0 +1,136 @@ +import py + +class Test_genitems: + def test_check_collect_hashes(self, testdir): + p = testdir.makepyfile(""" + def test_1(): + pass + + def test_2(): + pass + """) + p.copy(p.dirpath(p.purebasename + "2" + ".py")) + items, reprec = testdir.inline_genitems(p.dirpath()) + assert len(items) == 4 + for numi, i in enumerate(items): + for numj, j in enumerate(items): + if numj != numi: + assert hash(i) != hash(j) + assert i != j + + def test_root_conftest_syntax_error(self, testdir): + # do we want to unify behaviour with + # test_subdir_conftest_error? + p = testdir.makepyfile(conftest="raise SyntaxError\n") + py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath()) + + def test_subdir_conftest_error(self, testdir): + tmp = testdir.tmpdir + tmp.ensure("sub", "conftest.py").write("raise SyntaxError('x')\n") + items, reprec = testdir.inline_genitems(tmp) + collectionfailures = reprec.getfailedcollections() + assert len(collectionfailures) == 1 + ev = collectionfailures[0] + assert "SyntaxError: x" in ev.longrepr.reprcrash.message + + def test_example_items1(self, testdir): + p = testdir.makepyfile(''' + def testone(): + pass + + class TestX: + def testmethod_one(self): + pass + + class TestY(TestX): + pass + ''') + items, reprec = testdir.inline_genitems(p) + assert len(items) == 3 + assert items[0].name == 'testone' + assert items[1].name == 'testmethod_one' + assert items[2].name == 'testmethod_one' + + # let's also test getmodpath here + assert items[0].getmodpath() == "testone" + assert items[1].getmodpath() == "TestX.testmethod_one" + assert items[2].getmodpath() == "TestY.testmethod_one" + + s = items[0].getmodpath(stopatmodule=False) + assert s.endswith("test_example_items1.testone") + print(s) + + +class TestKeywordSelection: + def test_select_simple(self, testdir): + file_test = testdir.makepyfile(""" + def test_one(): assert 0 + class TestClass(object): + def test_method_one(self): + assert 42 == 43 + """) + def check(keyword, name): + reprec = testdir.inline_run("-s", "-k", keyword, file_test) + passed, skipped, failed = reprec.listoutcomes() + assert len(failed) == 1 + assert failed[0].item.name == name + assert len(reprec.getcalls('pytest_deselected')) == 1 + + for keyword in ['test_one', 'est_on']: + #yield check, keyword, 'test_one' + check(keyword, 'test_one') + check('TestClass.test', 'test_method_one') + + def test_select_extra_keywords(self, testdir): + p = testdir.makepyfile(test_select=""" + def test_1(): + pass + class TestClass: + def test_2(self): + pass + """) + testdir.makepyfile(conftest=""" + import py + class Class(py.test.collect.Class): + def _keywords(self): + return ['xxx', self.name] + """) + for keyword in ('xxx', 'xxx test_2', 'TestClass', 'xxx -test_1', + 'TestClass test_2', 'xxx TestClass test_2',): + reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) + py.builtin.print_("keyword", repr(keyword)) + passed, skipped, failed = reprec.listoutcomes() + assert len(passed) == 1 + assert passed[0].item.name == "test_2" + dlist = reprec.getcalls("pytest_deselected") + assert len(dlist) == 1 + assert dlist[0].items[0].name == 'test_1' + + def test_select_starton(self, testdir): + threepass = testdir.makepyfile(test_threepass=""" + def test_one(): assert 1 + def test_two(): assert 1 + def test_three(): assert 1 + """) + reprec = testdir.inline_run("-k", "test_two:", threepass) + passed, skipped, failed = reprec.listoutcomes() + assert len(passed) == 2 + assert not failed + dlist = reprec.getcalls("pytest_deselected") + assert len(dlist) == 1 + item = dlist[0].items[0] + assert item.name == "test_one" + + + def test_keyword_extra(self, testdir): + p = testdir.makepyfile(""" + def test_one(): + assert 0 + test_one.mykeyword = True + """) + reprec = testdir.inline_run("-k", "-mykeyword", p) + passed, skipped, failed = reprec.countoutcomes() + assert passed + skipped + failed == 0 + reprec = testdir.inline_run("-k", "mykeyword", p) + passed, skipped, failed = reprec.countoutcomes() + assert failed == 1 --- /dev/null +++ b/testing/test_deprecated_api.py @@ -0,0 +1,344 @@ + +import py +from py.impl.test.outcome import Skipped + +class TestCollectDeprecated: + + def test_collect_with_deprecated_run_and_join(self, testdir, recwarn): + testdir.makeconftest(""" + import py + + class MyInstance(py.test.collect.Instance): + def run(self): + return ['check2'] + def join(self, name): + if name == 'check2': + return self.Function(name=name, parent=self) + + class MyClass(py.test.collect.Class): + def run(self): + return ['check2'] + def join(self, name): + return MyInstance(name='i', parent=self) + + class MyModule(py.test.collect.Module): + def run(self): + return ['check', 'Cls'] + def join(self, name): + if name == 'check': + return self.Function(name, parent=self) + if name == 'Cls': + return MyClass(name, parent=self) + + class MyDirectory(py.test.collect.Directory): + Module = MyModule + def run(self): + return ['somefile.py'] + def join(self, name): + if name == "somefile.py": + return self.Module(self.fspath.join(name), parent=self) + + def pytest_collect_directory(path, parent): + if path.basename == "subconf": + return MyDirectory(path, parent) + """) + subconf = testdir.mkpydir("subconf") + somefile = subconf.join("somefile.py") + somefile.write(py.code.Source(""" + def check(): pass + class Cls: + def check2(self): pass + """)) + config = testdir.parseconfig(somefile) + dirnode = config.getnode(somefile.dirpath()) + colitems = dirnode.collect() + w = recwarn.pop(DeprecationWarning) + assert w.filename.find("conftest.py") != -1 + #recwarn.resetregistry() + #assert 0, (w.message, w.filename, w.lineno) + assert len(colitems) == 1 + modcol = colitems[0] + assert modcol.name == "somefile.py" + colitems = modcol.collect() + recwarn.pop(DeprecationWarning) + assert len(colitems) == 2 + assert colitems[0].name == 'check' + assert colitems[1].name == 'Cls' + clscol = colitems[1] + + colitems = clscol.collect() + recwarn.pop(DeprecationWarning) + assert len(colitems) == 1 + icol = colitems[0] + colitems = icol.collect() + recwarn.pop(DeprecationWarning) + assert len(colitems) == 1 + assert colitems[0].name == 'check2' + + def test_collect_with_deprecated_join_but_no_run(self, testdir, recwarn): + testdir.makepyfile(conftest=""" + import py + + class Module(py.test.collect.Module): + def funcnamefilter(self, name): + if name.startswith("check_"): + return True + return super(Module, self).funcnamefilter(name) + def join(self, name): + if name.startswith("check_"): + return self.Function(name, parent=self) + assert name != "SomeClass", "join should not be called with this name" + """) + col = testdir.getmodulecol(""" + def somefunc(): pass + def check_one(): pass + class SomeClass: pass + """) + colitems = col.collect() + recwarn.pop(DeprecationWarning) + assert len(colitems) == 1 + funcitem = colitems[0] + assert funcitem.name == "check_one" + + def test_function_custom_run(self, testdir, recwarn): + testdir.makepyfile(conftest=""" + import py + class Function(py.test.collect.Function): + def run(self): + pass + """) + modcol = testdir.getmodulecol("def test_func(): pass") + funcitem = modcol.collect()[0] + assert funcitem.name == 'test_func' + recwarn.clear() + funcitem._deprecated_testexecution() + recwarn.pop(DeprecationWarning) + + def test_function_custom_execute(self, testdir, recwarn): + testdir.makepyfile(conftest=""" + import py + + class MyFunction(py.test.collect.Function): + def execute(self, obj, *args): + pass + Function=MyFunction + """) + modcol = testdir.getmodulecol("def test_func2(): pass") + funcitem = modcol.collect()[0] + w = recwarn.pop(DeprecationWarning) # for defining conftest.Function + assert funcitem.name == 'test_func2' + funcitem._deprecated_testexecution() + w = recwarn.pop(DeprecationWarning) + assert w.filename.find("conftest.py") != -1 + + def test_function_deprecated_run_execute(self, testdir, recwarn): + testdir.makepyfile(conftest=""" + import py + + class Function(py.test.collect.Function): + + def run(self): + pass + """) + modcol = testdir.getmodulecol("def test_some2(): pass") + funcitem = modcol.collect()[0] + w = recwarn.pop(DeprecationWarning) + assert "conftest.py" in str(w.message) + + recwarn.clear() + funcitem._deprecated_testexecution() + recwarn.pop(DeprecationWarning) + + def test_function_deprecated_run_recursive(self, testdir): + testdir.makepyfile(conftest=""" + import py + class Module(py.test.collect.Module): + def run(self): + return super(Module, self).run() + """) + modcol = testdir.getmodulecol("def test_some(): pass") + colitems = py.test.deprecated_call(modcol.collect) + funcitem = colitems[0] + + def test_conftest_subclasses_Module_with_non_pyfile(self, testdir): + testdir.makepyfile(conftest=""" + import py + class Module(py.test.collect.Module): + def run(self): + return [] + class Directory(py.test.collect.Directory): + def consider_file(self, path): + if path.basename == "testme.xxx": + return Module(path, parent=self) + return super(Directory, self).consider_file(path) + """) + testme = testdir.makefile('xxx', testme="hello") + config = testdir.parseconfig(testme) + col = config.getnode(testme) + assert col.collect() == [] + + + +class TestDisabled: + def test_disabled_module(self, recwarn, testdir): + modcol = testdir.getmodulecol(""" + disabled = True + def setup_module(mod): + raise ValueError + def test_method(): + pass + """) + l = modcol.collect() + assert len(l) == 1 + recwarn.clear() + py.test.raises(Skipped, "modcol.setup()") + recwarn.pop(DeprecationWarning) + + def test_disabled_class(self, recwarn, testdir): + modcol = testdir.getmodulecol(""" + class TestClass: + disabled = True + def test_method(self): + pass + """) + l = modcol.collect() + assert len(l) == 1 + modcol = l[0] + assert isinstance(modcol, py.test.collect.Class) + l = modcol.collect() + assert len(l) == 1 + recwarn.clear() + py.test.raises(Skipped, "modcol.setup()") + recwarn.pop(DeprecationWarning) + + def test_disabled_class_functional(self, testdir): + reprec = testdir.inline_runsource(""" + class TestSimpleClassSetup: + disabled = True + def test_classlevel(self): pass + def test_classlevel2(self): pass + """) + reprec.assertoutcome(skipped=2) + + @py.test.mark.multi(name="Directory Module Class Function".split()) + def test_function_deprecated_run_execute(self, name, testdir, recwarn): + testdir.makeconftest(""" + import py + class %s(py.test.collect.%s): + pass + """ % (name, name)) + p = testdir.makepyfile(""" + class TestClass: + def test_method(self): + pass + def test_function(): + pass + """) + config = testdir.parseconfig() + if name == "Directory": + config.getnode(testdir.tmpdir) + elif name in ("Module", "File"): + config.getnode(p) + else: + fnode = config.getnode(p) + recwarn.clear() + fnode.collect() + w = recwarn.pop(DeprecationWarning) + assert "conftest.py" in str(w.message) + +def test_config_cmdline_options(recwarn, testdir): + testdir.makepyfile(conftest=""" + import py + def _callback(option, opt_str, value, parser, *args, **kwargs): + option.tdest = True + Option = py.test.config.Option + option = py.test.config.addoptions("testing group", + Option('-G', '--glong', action="store", default=42, + type="int", dest="gdest", help="g value."), + # XXX note: special case, option without a destination + Option('-T', '--tlong', action="callback", callback=_callback, + help='t value'), + ) + """) + recwarn.clear() + config = testdir.reparseconfig(['-G', '17']) + recwarn.pop(DeprecationWarning) + assert config.option.gdest == 17 + +def test_conftest_non_python_items(recwarn, testdir): + testdir.makepyfile(conftest=""" + import py + class CustomItem(py.test.collect.Item): + def run(self): + pass + class Directory(py.test.collect.Directory): + def consider_file(self, fspath): + if fspath.ext == ".xxx": + return CustomItem(fspath.basename, parent=self) + """) + checkfile = testdir.makefile(ext="xxx", hello="world") + testdir.makepyfile(x="") + testdir.maketxtfile(x="") + config = testdir.parseconfig() + recwarn.clear() + dircol = config.getnode(checkfile.dirpath()) + w = recwarn.pop(DeprecationWarning) + assert str(w.message).find("conftest.py") != -1 + colitems = dircol.collect() + assert len(colitems) == 1 + assert colitems[0].name == "hello.xxx" + assert colitems[0].__class__.__name__ == "CustomItem" + + item = config.getnode(checkfile) + assert item.name == "hello.xxx" + assert item.__class__.__name__ == "CustomItem" + +def test_extra_python_files_and_functions(testdir): + testdir.makepyfile(conftest=""" + import py + class MyFunction(py.test.collect.Function): + pass + class Directory(py.test.collect.Directory): + def consider_file(self, path): + if path.check(fnmatch="check_*.py"): + return self.Module(path, parent=self) + return super(Directory, self).consider_file(path) + class myfuncmixin: + Function = MyFunction + def funcnamefilter(self, name): + return name.startswith('check_') + class Module(myfuncmixin, py.test.collect.Module): + def classnamefilter(self, name): + return name.startswith('CustomTestClass') + class Instance(myfuncmixin, py.test.collect.Instance): + pass + """) + checkfile = testdir.makepyfile(check_file=""" + def check_func(): + assert 42 == 42 + class CustomTestClass: + def check_method(self): + assert 23 == 23 + """) + # check that directory collects "check_" files + config = testdir.parseconfig() + col = config.getnode(checkfile.dirpath()) + colitems = col.collect() + assert len(colitems) == 1 + assert isinstance(colitems[0], py.test.collect.Module) + + # check that module collects "check_" functions and methods + config = testdir.parseconfig(checkfile) + col = config.getnode(checkfile) + assert isinstance(col, py.test.collect.Module) + colitems = col.collect() + assert len(colitems) == 2 + funccol = colitems[0] + assert isinstance(funccol, py.test.collect.Function) + assert funccol.name == "check_func" + clscol = colitems[1] + assert isinstance(clscol, py.test.collect.Class) + colitems = clscol.collect()[0].collect() + assert len(colitems) == 1 + assert colitems[0].name == "check_method" + --- a/testing/pytest/test_genitems.py +++ /dev/null @@ -1,136 +0,0 @@ -import py - -class Test_genitems: - def test_check_collect_hashes(self, testdir): - p = testdir.makepyfile(""" - def test_1(): - pass - - def test_2(): - pass - """) - p.copy(p.dirpath(p.purebasename + "2" + ".py")) - items, reprec = testdir.inline_genitems(p.dirpath()) - assert len(items) == 4 - for numi, i in enumerate(items): - for numj, j in enumerate(items): - if numj != numi: - assert hash(i) != hash(j) - assert i != j - - def test_root_conftest_syntax_error(self, testdir): - # do we want to unify behaviour with - # test_subdir_conftest_error? - p = testdir.makepyfile(conftest="raise SyntaxError\n") - py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath()) - - def test_subdir_conftest_error(self, testdir): - tmp = testdir.tmpdir - tmp.ensure("sub", "conftest.py").write("raise SyntaxError('x')\n") - items, reprec = testdir.inline_genitems(tmp) - collectionfailures = reprec.getfailedcollections() - assert len(collectionfailures) == 1 - ev = collectionfailures[0] - assert "SyntaxError: x" in ev.longrepr.reprcrash.message - - def test_example_items1(self, testdir): - p = testdir.makepyfile(''' - def testone(): - pass - - class TestX: - def testmethod_one(self): - pass - - class TestY(TestX): - pass - ''') - items, reprec = testdir.inline_genitems(p) - assert len(items) == 3 - assert items[0].name == 'testone' - assert items[1].name == 'testmethod_one' - assert items[2].name == 'testmethod_one' - - # let's also test getmodpath here - assert items[0].getmodpath() == "testone" - assert items[1].getmodpath() == "TestX.testmethod_one" - assert items[2].getmodpath() == "TestY.testmethod_one" - - s = items[0].getmodpath(stopatmodule=False) - assert s.endswith("test_example_items1.testone") - print(s) - - -class TestKeywordSelection: - def test_select_simple(self, testdir): - file_test = testdir.makepyfile(""" - def test_one(): assert 0 - class TestClass(object): - def test_method_one(self): - assert 42 == 43 - """) - def check(keyword, name): - reprec = testdir.inline_run("-s", "-k", keyword, file_test) - passed, skipped, failed = reprec.listoutcomes() - assert len(failed) == 1 - assert failed[0].item.name == name - assert len(reprec.getcalls('pytest_deselected')) == 1 - - for keyword in ['test_one', 'est_on']: - #yield check, keyword, 'test_one' - check(keyword, 'test_one') - check('TestClass.test', 'test_method_one') - - def test_select_extra_keywords(self, testdir): - p = testdir.makepyfile(test_select=""" - def test_1(): - pass - class TestClass: - def test_2(self): - pass - """) - testdir.makepyfile(conftest=""" - import py - class Class(py.test.collect.Class): - def _keywords(self): - return ['xxx', self.name] - """) - for keyword in ('xxx', 'xxx test_2', 'TestClass', 'xxx -test_1', - 'TestClass test_2', 'xxx TestClass test_2',): - reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) - py.builtin.print_("keyword", repr(keyword)) - passed, skipped, failed = reprec.listoutcomes() - assert len(passed) == 1 - assert passed[0].item.name == "test_2" - dlist = reprec.getcalls("pytest_deselected") - assert len(dlist) == 1 - assert dlist[0].items[0].name == 'test_1' - - def test_select_starton(self, testdir): - threepass = testdir.makepyfile(test_threepass=""" - def test_one(): assert 1 - def test_two(): assert 1 - def test_three(): assert 1 - """) - reprec = testdir.inline_run("-k", "test_two:", threepass) - passed, skipped, failed = reprec.listoutcomes() - assert len(passed) == 2 - assert not failed - dlist = reprec.getcalls("pytest_deselected") - assert len(dlist) == 1 - item = dlist[0].items[0] - assert item.name == "test_one" - - - def test_keyword_extra(self, testdir): - p = testdir.makepyfile(""" - def test_one(): - assert 0 - test_one.mykeyword = True - """) - reprec = testdir.inline_run("-k", "-mykeyword", p) - passed, skipped, failed = reprec.countoutcomes() - assert passed + skipped + failed == 0 - reprec = testdir.inline_run("-k", "mykeyword", p) - passed, skipped, failed = reprec.countoutcomes() - assert failed == 1 --- a/testing/pytest/test_collect.py +++ /dev/null @@ -1,238 +0,0 @@ -import py - -class TestCollector: - def test_collect_versus_item(self): - from py.impl.test.collect import Collector, Item - assert not issubclass(Collector, Item) - assert not issubclass(Item, Collector) - - def test_check_equality(self, testdir): - modcol = testdir.getmodulecol(""" - def test_pass(): pass - def test_fail(): assert 0 - """) - fn1 = modcol.collect_by_name("test_pass") - assert isinstance(fn1, py.test.collect.Function) - fn2 = modcol.collect_by_name("test_pass") - assert isinstance(fn2, py.test.collect.Function) - - assert fn1 == fn2 - assert fn1 != modcol - if py.std.sys.version_info < (3, 0): - assert cmp(fn1, fn2) == 0 - assert hash(fn1) == hash(fn2) - - fn3 = modcol.collect_by_name("test_fail") - assert isinstance(fn3, py.test.collect.Function) - assert not (fn1 == fn3) - assert fn1 != fn3 - - for fn in fn1,fn2,fn3: - assert fn != 3 - assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn - assert modcol != fn - - def test_getparent(self, testdir): - modcol = testdir.getmodulecol(""" - class TestClass: - def test_foo(): - pass - """) - cls = modcol.collect_by_name("TestClass") - fn = cls.collect_by_name("()").collect_by_name("test_foo") - - parent = fn.getparent(py.test.collect.Module) - assert parent is modcol - - parent = fn.getparent(py.test.collect.Function) - assert parent is fn - - parent = fn.getparent(py.test.collect.Class) - assert parent is cls - - - def test_getcustomfile_roundtrip(self, testdir): - hello = testdir.makefile(".xxx", hello="world") - testdir.makepyfile(conftest=""" - import py - class CustomFile(py.test.collect.File): - pass - class MyDirectory(py.test.collect.Directory): - def collect(self): - return [CustomFile(self.fspath.join("hello.xxx"), parent=self)] - def pytest_collect_directory(path, parent): - return MyDirectory(path, parent=parent) - """) - config = testdir.parseconfig(hello) - node = config.getnode(hello) - assert isinstance(node, py.test.collect.File) - assert node.name == "hello.xxx" - names = config._rootcol.totrail(node) - node = config._rootcol.getbynames(names) - assert isinstance(node, py.test.collect.File) - -class TestCollectFS: - def test_ignored_certain_directories(self, testdir): - tmpdir = testdir.tmpdir - tmpdir.ensure("_darcs", 'test_notfound.py') - tmpdir.ensure("CVS", 'test_notfound.py') - tmpdir.ensure("{arch}", 'test_notfound.py') - tmpdir.ensure(".whatever", 'test_notfound.py') - tmpdir.ensure(".bzr", 'test_notfound.py') - tmpdir.ensure("normal", 'test_found.py') - tmpdir.ensure('test_found.py') - - col = testdir.parseconfig(tmpdir).getnode(tmpdir) - items = col.collect() - names = [x.name for x in items] - assert len(items) == 2 - assert 'normal' in names - assert 'test_found.py' in names - - def test_found_certain_testfiles(self, testdir): - p1 = testdir.makepyfile(test_found = "pass", found_test="pass") - col = testdir.parseconfig(p1).getnode(p1.dirpath()) - items = col.collect() # Directory collect returns files sorted by name - assert len(items) == 2 - assert items[1].name == 'test_found.py' - assert items[0].name == 'found_test.py' - - def test_directory_file_sorting(self, testdir): - p1 = testdir.makepyfile(test_one="hello") - p1.dirpath().mkdir("x") - p1.dirpath().mkdir("dir1") - testdir.makepyfile(test_two="hello") - p1.dirpath().mkdir("dir2") - config = testdir.parseconfig() - col = config.getnode(p1.dirpath()) - names = [x.name for x in col.collect()] - assert names == ["dir1", "dir2", "test_one.py", "test_two.py", "x"] - -class TestCollectPluginHookRelay: - def test_pytest_collect_file(self, testdir): - tmpdir = testdir.tmpdir - wascalled = [] - class Plugin: - def pytest_collect_file(self, path, parent): - wascalled.append(path) - config = testdir.Config() - config.pluginmanager.register(Plugin()) - config.parse([tmpdir]) - col = config.getnode(tmpdir) - testdir.makefile(".abc", "xyz") - res = col.collect() - assert len(wascalled) == 1 - assert wascalled[0].ext == '.abc' - - def test_pytest_collect_directory(self, testdir): - tmpdir = testdir.tmpdir - wascalled = [] - class Plugin: - def pytest_collect_directory(self, path, parent): - wascalled.append(path.basename) - return parent.Directory(path, parent) - testdir.plugins.append(Plugin()) - testdir.mkdir("hello") - testdir.mkdir("world") - reprec = testdir.inline_run() - assert "hello" in wascalled - assert "world" in wascalled - # make sure the directories do not get double-appended - colreports = reprec.getreports("pytest_collectreport") - names = [rep.collector.name for rep in colreports] - assert names.count("hello") == 1 - -class TestPrunetraceback: - def test_collection_error(self, testdir): - p = testdir.makepyfile(""" - import not_exists - """) - result = testdir.runpytest(p) - assert "__import__" not in result.stdout.str(), "too long traceback" - result.stdout.fnmatch_lines([ - "*ERROR during collection*", - "*mport*not_exists*" - ]) - -class TestCustomConftests: - def test_collectignore_exclude_on_option(self, testdir): - testdir.makeconftest(""" - collect_ignore = ['hello', 'test_world.py'] - def pytest_addoption(parser): - parser.addoption("--XX", action="store_true", default=False) - def pytest_configure(config): - if config.getvalue("XX"): - collect_ignore[:] = [] - """) - testdir.mkdir("hello") - testdir.makepyfile(test_world="#") - reprec = testdir.inline_run(testdir.tmpdir) - names = [rep.collector.name for rep in reprec.getreports("pytest_collectreport")] - assert 'hello' not in names - assert 'test_world.py' not in names - reprec = testdir.inline_run(testdir.tmpdir, "--XX") - names = [rep.collector.name for rep in reprec.getreports("pytest_collectreport")] - assert 'hello' in names - assert 'test_world.py' in names - - def test_pytest_fs_collect_hooks_are_seen(self, testdir): - testdir.makeconftest(""" - import py - class MyDirectory(py.test.collect.Directory): - pass - class MyModule(py.test.collect.Module): - pass - def pytest_collect_directory(path, parent): - return MyDirectory(path, parent) - def pytest_collect_file(path, parent): - return MyModule(path, parent) - """) - testdir.makepyfile("def test_x(): pass") - result = testdir.runpytest("--collectonly") - result.stdout.fnmatch_lines([ - "*MyDirectory*", - "*MyModule*", - "*test_x*" - ]) - -class TestRootCol: - def test_totrail_and_back(self, testdir, tmpdir): - a = tmpdir.ensure("a", dir=1) - tmpdir.ensure("a", "__init__.py") - x = tmpdir.ensure("a", "trail.py") - config = testdir.reparseconfig([x]) - col = config.getnode(x) - trail = config._rootcol.totrail(col) - col2 = config._rootcol.fromtrail(trail) - assert col2 == col - - def test_totrail_topdir_and_beyond(self, testdir, tmpdir): - config = testdir.reparseconfig() - col = config.getnode(config.topdir) - trail = config._rootcol.totrail(col) - col2 = config._rootcol.fromtrail(trail) - assert col2.fspath == config.topdir - assert len(col2.listchain()) == 1 - py.test.raises(config.Error, "config.getnode(config.topdir.dirpath())") - #col3 = config.getnode(config.topdir.dirpath()) - #py.test.raises(ValueError, - # "col3._totrail()") - - def test_argid(self, testdir, tmpdir): - cfg = testdir.parseconfig() - p = testdir.makepyfile("def test_func(): pass") - item = cfg.getnode("%s::test_func" % p) - assert item.name == "test_func" - - def test_argid_with_method(self, testdir, tmpdir): - cfg = testdir.parseconfig() - p = testdir.makepyfile(""" - class TestClass: - def test_method(self): pass - """) - item = cfg.getnode("%s::TestClass::()::test_method" % p) - assert item.name == "test_method" - item = cfg.getnode("%s::TestClass::test_method" % p) - assert item.name == "test_method" --- /dev/null +++ b/testing/test_funcargs.py @@ -0,0 +1,544 @@ +import py, sys +from py.impl.test import funcargs + +def test_getfuncargnames(): + def f(): pass + assert not funcargs.getfuncargnames(f) + def g(arg): pass + assert funcargs.getfuncargnames(g) == ['arg'] + def h(arg1, arg2="hello"): pass + assert funcargs.getfuncargnames(h) == ['arg1'] + def h(arg1, arg2, arg3="hello"): pass + assert funcargs.getfuncargnames(h) == ['arg1', 'arg2'] + class A: + def f(self, arg1, arg2="hello"): + pass + assert funcargs.getfuncargnames(A().f) == ['arg1'] + if sys.version_info < (3,0): + assert funcargs.getfuncargnames(A.f) == ['arg1'] + +def test_callspec_repr(): + cs = funcargs.CallSpec({}, 'hello', 1) + repr(cs) + cs = funcargs.CallSpec({}, 'hello', funcargs._notexists) + repr(cs) + +class TestFillFuncArgs: + def test_funcarg_lookupfails(self, testdir): + testdir.makeconftest(""" + def pytest_funcarg__xyzsomething(request): + return 42 + """) + item = testdir.getitem("def test_func(some): pass") + exc = py.test.raises(LookupError, "funcargs.fillfuncargs(item)") + s = str(exc.value) + assert s.find("xyzsomething") != -1 + + def test_funcarg_lookup_default(self, testdir): + item = testdir.getitem("def test_func(some, other=42): pass") + class Provider: + def pytest_funcarg__some(self, request): + return request.function.__name__ + item.config.pluginmanager.register(Provider()) + funcargs.fillfuncargs(item) + assert len(item.funcargs) == 1 + + def test_funcarg_basic(self, testdir): + item = testdir.getitem("def test_func(some, other): pass") + class Provider: + def pytest_funcarg__some(self, request): + return request.function.__name__ + def pytest_funcarg__other(self, request): + return 42 + item.config.pluginmanager.register(Provider()) + funcargs.fillfuncargs(item) + assert len(item.funcargs) == 2 + assert item.funcargs['some'] == "test_func" + assert item.funcargs['other'] == 42 + + def test_funcarg_lookup_modulelevel(self, testdir): + modcol = testdir.getmodulecol(""" + def pytest_funcarg__something(request): + return request.function.__name__ + + class TestClass: + def test_method(self, something): + pass + def test_func(something): + pass + """) + item1, item2 = testdir.genitems([modcol]) + funcargs.fillfuncargs(item1) + assert item1.funcargs['something'] == "test_method" + funcargs.fillfuncargs(item2) + assert item2.funcargs['something'] == "test_func" + + def test_funcarg_lookup_classlevel(self, testdir): + p = testdir.makepyfile(""" + class TestClass: + def pytest_funcarg__something(self, request): + return request.instance + def test_method(self, something): + assert something is self + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines([ + "*1 passed*" + ]) + + def test_fillfuncargs_exposed(self, testdir): + item = testdir.getitem("def test_func(some, other=42): pass") + class Provider: + def pytest_funcarg__some(self, request): + return request.function.__name__ + item.config.pluginmanager.register(Provider()) + if hasattr(item, '_args'): + del item._args + py.test.collect._fillfuncargs(item) + assert len(item.funcargs) == 1 + +class TestRequest: + def test_request_attributes(self, testdir): + item = testdir.getitem(""" + def pytest_funcarg__something(request): pass + def test_func(something): pass + """) + req = funcargs.FuncargRequest(item) + assert req.function == item.obj + assert hasattr(req.module, 'test_func') + assert req.cls is None + assert req.function.__name__ == "test_func" + assert req.config == item.config + assert repr(req).find(req.function.__name__) != -1 + + def test_request_attributes_method(self, testdir): + item, = testdir.getitems(""" + class TestB: + def test_func(self, something): + pass + """) + req = funcargs.FuncargRequest(item) + assert req.cls.__name__ == "TestB" + assert req.instance.__class__ == req.cls + + def XXXtest_request_contains_funcarg_name2factory(self, testdir): + modcol = testdir.getmodulecol(""" + def pytest_funcarg__something(request): + pass + class TestClass: + def test_method(self, something): + pass + """) + item1, = testdir.genitems([modcol]) + assert item1.name == "test_method" + name2factory = funcargs.FuncargRequest(item1)._name2factory + assert len(name2factory) == 1 + assert name2factory[0].__name__ == "pytest_funcarg__something" + + def test_getfuncargvalue_recursive(self, testdir): + testdir.makeconftest(""" + def pytest_funcarg__something(request): + return 1 + """) + item = testdir.getitem(""" + def pytest_funcarg__something(request): + return request.getfuncargvalue("something") + 1 + def test_func(something): + assert something == 2 + """) + req = funcargs.FuncargRequest(item) + val = req.getfuncargvalue("something") + assert val == 2 + + def test_getfuncargvalue(self, testdir): + item = testdir.getitem(""" + l = [2] + def pytest_funcarg__something(request): return 1 + def pytest_funcarg__other(request): + return l.pop() + def test_func(something): pass + """) + req = funcargs.FuncargRequest(item) + py.test.raises(req.Error, req.getfuncargvalue, "notexists") + val = req.getfuncargvalue("something") + assert val == 1 + val = req.getfuncargvalue("something") + assert val == 1 + val2 = req.getfuncargvalue("other") + assert val2 == 2 + val2 = req.getfuncargvalue("other") # see about caching + assert val2 == 2 + req._fillfuncargs() + assert item.funcargs == {'something': 1} + + def test_request_addfinalizer(self, testdir): + item = testdir.getitem(""" + teardownlist = [] + def pytest_funcarg__something(request): + request.addfinalizer(lambda: teardownlist.append(1)) + def test_func(something): pass + """) + req = funcargs.FuncargRequest(item) + req.config._setupstate.prepare(item) # XXX + req._fillfuncargs() + # successively check finalization calls + teardownlist = item.getparent(py.test.collect.Module).obj.teardownlist + ss = item.config._setupstate + assert not teardownlist + ss.teardown_exact(item) + print(ss.stack) + assert teardownlist == [1] + + def test_request_addfinalizer_partial_setup_failure(self, testdir): + p = testdir.makepyfile(""" + l = [] + def pytest_funcarg__something(request): + request.addfinalizer(lambda: l.append(None)) + def test_func(something, missingarg): + pass + def test_second(): + assert len(l) == 1 + """) + result = testdir.runpytest(p) + assert result.stdout.fnmatch_lines([ + "*1 passed*1 error*" + ]) + + def test_request_getmodulepath(self, testdir): + modcol = testdir.getmodulecol("def test_somefunc(): pass") + item, = testdir.genitems([modcol]) + req = funcargs.FuncargRequest(item) + assert req.fspath == modcol.fspath + +class TestRequestCachedSetup: + def test_request_cachedsetup(self, testdir): + item1,item2 = testdir.getitems(""" + class TestClass: + def test_func1(self, something): + pass + def test_func2(self, something): + pass + """) + req1 = funcargs.FuncargRequest(item1) + l = ["hello"] + def setup(): + return l.pop() + ret1 = req1.cached_setup(setup) + assert ret1 == "hello" + ret1b = req1.cached_setup(setup) + assert ret1 == ret1b + req2 = funcargs.FuncargRequest(item2) + ret2 = req2.cached_setup(setup) + assert ret2 == ret1 + + def test_request_cachedsetup_extrakey(self, testdir): + item1 = testdir.getitem("def test_func(): pass") + req1 = funcargs.FuncargRequest(item1) + l = ["hello", "world"] + def setup(): + return l.pop() + ret1 = req1.cached_setup(setup, extrakey=1) + ret2 = req1.cached_setup(setup, extrakey=2) + assert ret2 == "hello" + assert ret1 == "world" + ret1b = req1.cached_setup(setup, extrakey=1) + ret2b = req1.cached_setup(setup, extrakey=2) + assert ret1 == ret1b + assert ret2 == ret2b + + def test_request_cachedsetup_cache_deletion(self, testdir): + item1 = testdir.getitem("def test_func(): pass") + req1 = funcargs.FuncargRequest(item1) + l = [] + def setup(): + l.append("setup") + def teardown(val): + l.append("teardown") + ret1 = req1.cached_setup(setup, teardown, scope="function") + assert l == ['setup'] + # artificial call of finalizer + req1.config._setupstate._callfinalizers(item1) + assert l == ["setup", "teardown"] + ret2 = req1.cached_setup(setup, teardown, scope="function") + assert l == ["setup", "teardown", "setup"] + req1.config._setupstate._callfinalizers(item1) + assert l == ["setup", "teardown", "setup", "teardown"] + + def test_request_cached_setup_two_args(self, testdir): + testdir.makepyfile(""" + def pytest_funcarg__arg1(request): + return request.cached_setup(lambda: 42) + def pytest_funcarg__arg2(request): + return request.cached_setup(lambda: 17) + def test_two_different_setups(arg1, arg2): + assert arg1 != arg2 + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*1 passed*" + ]) + + def test_request_cached_setup_getfuncargvalue(self, testdir): + testdir.makepyfile(""" + def pytest_funcarg__arg1(request): + arg1 = request.getfuncargvalue("arg2") + return request.cached_setup(lambda: arg1 + 1) + def pytest_funcarg__arg2(request): + return request.cached_setup(lambda: 10) + def test_two_funcarg(arg1): + assert arg1 == 11 + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*1 passed*" + ]) + + def test_request_cached_setup_functional(self, testdir): + testdir.makepyfile(test_0=""" + l = [] + def pytest_funcarg__something(request): + val = request.cached_setup(setup, teardown) + return val + def setup(mycache=[1]): + l.append(mycache.pop()) + return l + def teardown(something): + l.remove(something[0]) + l.append(2) + def test_list_once(something): + assert something == [1] + def test_list_twice(something): + assert something == [1] + """) + testdir.makepyfile(test_1=""" + import test_0 # should have run already + def test_check_test0_has_teardown_correct(): + assert test_0.l == [2] + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*3 passed*" + ]) + +class TestMetafunc: + def test_no_funcargs(self, testdir): + def function(): pass + metafunc = funcargs.Metafunc(function) + assert not metafunc.funcargnames + + def test_function_basic(self): + def func(arg1, arg2="qwe"): pass + metafunc = funcargs.Metafunc(func) + assert len(metafunc.funcargnames) == 1 + assert 'arg1' in metafunc.funcargnames + assert metafunc.function is func + assert metafunc.cls is None + + def test_addcall_no_args(self): + def func(arg1): pass + metafunc = funcargs.Metafunc(func) + metafunc.addcall() + assert len(metafunc._calls) == 1 + call = metafunc._calls[0] + assert call.id == "0" + assert not hasattr(call, 'param') + + def test_addcall_id(self): + def func(arg1): pass + metafunc = funcargs.Metafunc(func) + py.test.raises(ValueError, "metafunc.addcall(id=None)") + + metafunc.addcall(id=1) + py.test.raises(ValueError, "metafunc.addcall(id=1)") + py.test.raises(ValueError, "metafunc.addcall(id='1')") + metafunc.addcall(id=2) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].id == "1" + assert metafunc._calls[1].id == "2" + + def test_addcall_param(self): + def func(arg1): pass + metafunc = funcargs.Metafunc(func) + class obj: pass + metafunc.addcall(param=obj) + metafunc.addcall(param=obj) + metafunc.addcall(param=1) + assert len(metafunc._calls) == 3 + assert metafunc._calls[0].param == obj + assert metafunc._calls[1].param == obj + assert metafunc._calls[2].param == 1 + + def test_addcall_funcargs(self): + def func(arg1): pass + metafunc = funcargs.Metafunc(func) + class obj: pass + metafunc.addcall(funcargs={"x": 2}) + metafunc.addcall(funcargs={"x": 3}) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == {'x': 2} + assert metafunc._calls[1].funcargs == {'x': 3} + assert not hasattr(metafunc._calls[1], 'param') + +class TestGenfuncFunctional: + def test_attributes(self, testdir): + p = testdir.makepyfile(""" + # assumes that generate/provide runs in the same process + import py + def pytest_generate_tests(metafunc): + metafunc.addcall(param=metafunc) + + def pytest_funcarg__metafunc(request): + assert request._pyfuncitem._genid == "0" + return request.param + + def test_function(metafunc): + assert metafunc.config == py.test.config + assert metafunc.module.__name__ == __name__ + assert metafunc.function == test_function + assert metafunc.cls is None + + class TestClass: + def test_method(self, metafunc): + assert metafunc.config == py.test.config + assert metafunc.module.__name__ == __name__ + if py.std.sys.version_info > (3, 0): + unbound = TestClass.test_method + else: + unbound = TestClass.test_method.im_func + # XXX actually have an unbound test function here? + assert metafunc.function == unbound + assert metafunc.cls == TestClass + """) + result = testdir.runpytest(p, "-v") + result.stdout.fnmatch_lines([ + "*2 passed in*", + ]) + + def test_addcall_with_two_funcargs_generators(self, testdir): + testdir.makeconftest(""" + def pytest_generate_tests(metafunc): + assert "arg1" in metafunc.funcargnames + metafunc.addcall(funcargs=dict(arg1=1, arg2=2)) + """) + p = testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) + + class TestClass: + def test_myfunc(self, arg1, arg2): + assert arg1 == arg2 + """) + result = testdir.runpytest("-v", p) + assert result.stdout.fnmatch_lines([ + "*test_myfunc*0*PASS*", + "*test_myfunc*1*FAIL*", + "*1 failed, 1 passed*" + ]) + + def test_two_functions(self, testdir): + p = testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.addcall(param=10) + metafunc.addcall(param=20) + + def pytest_funcarg__arg1(request): + return request.param + + def test_func1(arg1): + assert arg1 == 10 + def test_func2(arg1): + assert arg1 in (10, 20) + """) + result = testdir.runpytest("-v", p) + assert result.stdout.fnmatch_lines([ + "*test_func1*0*PASS*", + "*test_func1*1*FAIL*", + "*test_func2*PASS*", + "*1 failed, 3 passed*" + ]) + + def test_generate_plugin_and_module(self, testdir): + testdir.makeconftest(""" + def pytest_generate_tests(metafunc): + assert "arg1" in metafunc.funcargnames + metafunc.addcall(id="world", param=(2,100)) + """) + p = testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.addcall(param=(1,1), id="hello") + + def pytest_funcarg__arg1(request): + return request.param[0] + def pytest_funcarg__arg2(request): + return request.param[1] + + class TestClass: + def test_myfunc(self, arg1, arg2): + assert arg1 == arg2 + """) + result = testdir.runpytest("-v", p) + assert result.stdout.fnmatch_lines([ + "*test_myfunc*hello*PASS*", + "*test_myfunc*world*FAIL*", + "*1 failed, 1 passed*" + ]) + + def test_generate_tests_in_class(self, testdir): + p = testdir.makepyfile(""" + class TestClass: + def pytest_generate_tests(self, metafunc): + metafunc.addcall(funcargs={'hello': 'world'}, id="hello") + + def test_myfunc(self, hello): + assert hello == "world" + """) + result = testdir.runpytest("-v", p) + assert result.stdout.fnmatch_lines([ + "*test_myfunc*hello*PASS*", + "*1 passed*" + ]) + + +def test_conftest_funcargs_only_available_in_subdir(testdir): + sub1 = testdir.mkpydir("sub1") + sub2 = testdir.mkpydir("sub2") + sub1.join("conftest.py").write(py.code.Source(""" + import py + def pytest_funcarg__arg1(request): + py.test.raises(Exception, "request.getfuncargvalue('arg2')") + """)) + sub2.join("conftest.py").write(py.code.Source(""" + import py + def pytest_funcarg__arg2(request): + py.test.raises(Exception, "request.getfuncargvalue('arg1')") + """)) + + sub1.join("test_in_sub1.py").write("def test_1(arg1): pass") + sub2.join("test_in_sub2.py").write("def test_2(arg2): pass") + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*2 passed*" + ]) + +def test_funcarg_non_pycollectobj(testdir): # rough jstests usage + testdir.makeconftest(""" + import py + def pytest_pycollect_makeitem(collector, name, obj): + if name == "MyClass": + return MyCollector(name, parent=collector) + class MyCollector(py.test.collect.Collector): + def reportinfo(self): + return self.fspath, 3, "xyz" + """) + modcol = testdir.getmodulecol(""" + def pytest_funcarg__arg1(request): + return 42 + class MyClass: + pass + """) + clscol = modcol.collect()[0] + clscol.obj = lambda arg1: None + clscol.funcargs = {} + funcargs.fillfuncargs(clscol) + assert clscol.funcargs['arg1'] == 42 + --- a/testing/pytest/test_traceback.py +++ /dev/null @@ -1,28 +0,0 @@ -import py - -class TestTracebackCutting: - def test_skip_simple(self): - from py.impl.test.outcome import Skipped - excinfo = py.test.raises(Skipped, 'py.test.skip("xxx")') - assert excinfo.traceback[-1].frame.code.name == "skip" - assert excinfo.traceback[-1].ishidden() - - def test_traceback_argsetup(self, testdir): - testdir.makeconftest(""" - def pytest_funcarg__hello(request): - raise ValueError("xyz") - """) - p = testdir.makepyfile("def test(hello): pass") - result = testdir.runpytest(p) - assert result.ret != 0 - out = result.stdout.str() - assert out.find("xyz") != -1 - assert out.find("conftest.py:2: ValueError") != -1 - numentries = out.count("_ _ _") # separator for traceback entries - assert numentries == 0 - - result = testdir.runpytest("--fulltrace", p) - out = result.stdout.str() - assert out.find("conftest.py:2: ValueError") != -1 - numentries = out.count("_ _ _ _") # separator for traceback entries - assert numentries >3 --- /dev/null +++ b/testing/test_parseopt.py @@ -0,0 +1,119 @@ +import py +from py.impl.test import parseopt + +class TestParser: + def test_init(self, capsys): + parser = parseopt.Parser(usage="xyz") + py.test.raises(SystemExit, 'parser.parse(["-h"])') + out, err = capsys.readouterr() + assert out.find("xyz") != -1 + + def test_group_add_and_get(self): + parser = parseopt.Parser() + group = parser.addgroup("hello", description="desc") + assert group.name == "hello" + assert group.description == "desc" + + def test_addgroup_deprecation(self, recwarn): + parser = parseopt.Parser() + group = parser.addgroup("hello", description="desc") + assert recwarn.pop() + group2 = parser.getgroup("hello") + assert group == group2 + + def test_getgroup_simple(self): + parser = parseopt.Parser() + group = parser.getgroup("hello", description="desc") + assert group.name == "hello" + assert group.description == "desc" + group2 = parser.getgroup("hello") + assert group2 is group + + def test_group_ordering(self): + parser = parseopt.Parser() + group0 = parser.getgroup("1") + group1 = parser.getgroup("2") + group1 = parser.getgroup("3", after="1") + groups = parser._groups + groups_names = [x.name for x in groups] + assert groups_names == list("132") + + def test_group_addoption(self): + group = parseopt.OptionGroup("hello") + group.addoption("--option1", action="store_true") + assert len(group.options) == 1 + assert isinstance(group.options[0], parseopt.optparse.Option) + + def test_group_shortopt_lowercase(self): + parser = parseopt.Parser() + group = parser.addgroup("hello") + py.test.raises(ValueError, """ + group.addoption("-x", action="store_true") + """) + assert len(group.options) == 0 + group._addoption("-x", action="store_true") + assert len(group.options) == 1 + + def test_parser_addoption(self): + parser = parseopt.Parser() + group = parser.getgroup("custom options") + assert len(group.options) == 0 + group.addoption("--option1", action="store_true") + assert len(group.options) == 1 + + def test_parse(self): + parser = parseopt.Parser() + parser.addoption("--hello", dest="hello", action="store") + option, args = parser.parse(['--hello', 'world']) + assert option.hello == "world" + assert not args + + def test_parse(self): + parser = parseopt.Parser() + option, args = parser.parse([py.path.local()]) + assert args[0] == py.path.local() + + def test_parse_will_set_default(self): + parser = parseopt.Parser() + parser.addoption("--hello", dest="hello", default="x", action="store") + option, args = parser.parse([]) + assert option.hello == "x" + del option.hello + args = parser.parse_setoption([], option) + assert option.hello == "x" + + def test_parse_setoption(self): + parser = parseopt.Parser() + parser.addoption("--hello", dest="hello", action="store") + parser.addoption("--world", dest="world", default=42) + class A: pass + option = A() + args = parser.parse_setoption(['--hello', 'world'], option) + assert option.hello == "world" + assert option.world == 42 + assert not args + + def test_parse_defaultgetter(self): + def defaultget(option): + if option.type == "int": + option.default = 42 + elif option.type == "string": + option.default = "world" + parser = parseopt.Parser(processopt=defaultget) + parser.addoption("--this", dest="this", type="int", action="store") + parser.addoption("--hello", dest="hello", type="string", action="store") + parser.addoption("--no", dest="no", action="store_true") + option, args = parser.parse([]) + assert option.hello == "world" + assert option.this == 42 + + at py.test.mark.skipif("sys.version_info < (2,5)") +def test_addoption_parser_epilog(testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + parser.hints.append("hello world") + """) + result = testdir.runpytest('--help') + #assert result.ret != 0 + assert result.stdout.fnmatch_lines(["*hint: hello world*"]) + --- /dev/null +++ b/testing/test_outcome.py @@ -0,0 +1,71 @@ + +import py +import sys + +class TestRaises: + def test_raises(self): + py.test.raises(ValueError, "int('qwe')") + + def test_raises_exec(self): + py.test.raises(ValueError, "a,x = []") + + def test_raises_syntax_error(self): + py.test.raises(SyntaxError, "qwe qwe qwe") + + def test_raises_function(self): + py.test.raises(ValueError, int, 'hello') + +def test_pytest_exit(): + try: + py.test.exit("hello") + except: + excinfo = py.code.ExceptionInfo() + assert excinfo.errisinstance(KeyboardInterrupt) + +def test_exception_printing_skip(): + try: + py.test.skip("hello") + except Exception: + excinfo = py.code.ExceptionInfo() + s = excinfo.exconly(tryshort=True) + assert s.startswith("Skipped") + +def test_importorskip(): + from py.impl.test.outcome import Skipped, importorskip + assert importorskip == py.test.importorskip + try: + sys = importorskip("sys") + assert sys == py.std.sys + #path = py.test.importorskip("os.path") + #assert path == py.std.os.path + py.test.raises(Skipped, "py.test.importorskip('alskdj')") + py.test.raises(SyntaxError, "py.test.importorskip('x y z')") + py.test.raises(SyntaxError, "py.test.importorskip('x=y')") + path = importorskip("py", minversion=".".join(py.__version__)) + mod = py.std.types.ModuleType("hello123") + mod.__version__ = "1.3" + py.test.raises(Skipped, """ + py.test.importorskip("hello123", minversion="5.0") + """) + except Skipped: + print(py.code.ExceptionInfo()) + py.test.fail("spurious skip") + +def test_importorskip_imports_last_module_part(): + import os + ospath = py.test.importorskip("os.path") + assert os.path == ospath + + +def test_pytest_cmdline_main(testdir): + p = testdir.makepyfile(""" + import sys + sys.path.insert(0, %r) + import py + def test_hello(): + assert 1 + if __name__ == '__main__': + py.test.cmdline.main([__file__]) + """ % (str(py._pydir.dirpath()))) + import subprocess + subprocess.check_call([sys.executable, str(p)]) --- a/testing/test_builtin.py +++ /dev/null @@ -1,148 +0,0 @@ -import sys -import py -from py.builtin import set, frozenset, reversed, sorted - -def test_enumerate(): - l = [0,1,2] - for i,x in enumerate(l): - assert i == x - -def test_BaseException(): - assert issubclass(IndexError, py.builtin.BaseException) - assert issubclass(Exception, py.builtin.BaseException) - assert issubclass(KeyboardInterrupt, py.builtin.BaseException) - - class MyRandomClass(object): - pass - assert not issubclass(MyRandomClass, py.builtin.BaseException) - - assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins') - assert Exception.__name__ == 'Exception' - - -def test_GeneratorExit(): - assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins') - assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException) - -def test_reversed(): - reversed = py.builtin.reversed - r = reversed("hello") - assert iter(r) is r - s = "".join(list(r)) - assert s == "olleh" - assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] - py.test.raises(TypeError, reversed, reversed("hello")) - -def test_simple(): - s = set([1, 2, 3, 4]) - assert s == set([3, 4, 2, 1]) - s1 = s.union(set([5, 6])) - assert 5 in s1 - assert 1 in s1 - -def test_frozenset(): - s = set([frozenset([0, 1]), frozenset([1, 0])]) - assert len(s) == 1 - -def test_sorted(): - if sorted == py.builtin.sorted: - return # don't test a real builtin - for s in [py.builtin.sorted]: - def test(): - assert s([3, 2, 1]) == [1, 2, 3] - assert s([1, 2, 3], reverse=True) == [3, 2, 1] - l = s([1, 2, 3, 4, 5, 6], key=lambda x: x % 2) - assert l == [2, 4, 6, 1, 3, 5] - l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y)) - assert l == [4, 3, 2, 1] - l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y), - key=lambda x: x % 2) - assert l == [1, 3, 2, 4] - - def compare(x, y): - assert type(x) == str - assert type(y) == str - return cmp(x, y) - data = 'The quick Brown fox Jumped over The lazy Dog'.split() - s(data, cmp=compare, key=str.lower) - yield test - - -def test_print_simple(): - from py.builtin import print_ - py.test.raises(TypeError, "print_(hello=3)") - f = py.io.TextIO() - print_("hello", "world", file=f) - s = f.getvalue() - assert s == "hello world\n" - - f = py.io.TextIO() - print_("hello", end="", file=f) - s = f.getvalue() - assert s == "hello" - - f = py.io.TextIO() - print_("xyz", "abc", sep="", end="", file=f) - s = f.getvalue() - assert s == "xyzabc" - - class X: - def __repr__(self): return "rep" - f = py.io.TextIO() - print_(X(), file=f) - assert f.getvalue() == "rep\n" - -def test_execfile(tmpdir): - test_file = tmpdir.join("test.py") - test_file.write("x = y\ndef f(): pass") - ns = {"y" : 42} - py.builtin.execfile(str(test_file), ns) - assert ns["x"] == 42 - assert py.code.getrawcode(ns["f"]).co_filename == str(test_file) - class A: - y = 3 - x = 4 - py.builtin.execfile(str(test_file)) - assert A.x == 3 - -def test_getfuncdict(): - def f(): - pass - f.x = 4 - assert py.builtin._getfuncdict(f)["x"] == 4 - assert py.builtin._getfuncdict(2) is None - -def test_callable(): - class A: pass - assert py.builtin.callable(test_callable) - assert py.builtin.callable(A) - assert py.builtin.callable(list) - assert py.builtin.callable(id) - assert not py.builtin.callable(4) - assert not py.builtin.callable("hi") - -def test_totext(): - py.builtin._totext("hello", "UTF-8") - -def test_reraise(): - from py.builtin import _reraise - try: - raise Exception() - except Exception: - cls, val, tb = sys.exc_info() - excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)") - -def test_exec(): - l = [] - py.builtin.exec_("l.append(1)") - assert l == [1] - d = {} - py.builtin.exec_("x=4", d) - assert d['x'] == 4 - -def test_tryimport(): - py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123') - x = py.builtin._tryimport('asldkajsdl', 'py') - assert x == py - x = py.builtin._tryimport('asldkajsdl', 'py.path') - assert x == py.path --- /dev/null +++ b/testing/test_pluginmanager.py @@ -0,0 +1,438 @@ +import py, os +from py.impl.test.pluginmanager import PluginManager, canonical_importname +from py.impl.test.pluginmanager import Registry, MultiCall, HookRelay, varnames + + +class TestBootstrapping: + def test_consider_env_fails_to_import(self, monkeypatch): + pluginmanager = PluginManager() + monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") + py.test.raises(ImportError, "pluginmanager.consider_env()") + + def test_preparse_args(self): + pluginmanager = PluginManager() + py.test.raises(ImportError, """ + pluginmanager.consider_preparse(["xyz", "-p", "hello123"]) + """) + + def test_plugin_skip(self, testdir, monkeypatch): + p = testdir.makepyfile(pytest_skipping1=""" + import py + py.test.skip("hello") + """) + p.copy(p.dirpath("pytest_skipping2.py")) + monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") + result = testdir.runpytest("-p", "skipping1", "--traceconfig") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*hint*skipping2*hello*", + "*hint*skipping1*hello*", + ]) + + def test_consider_env_plugin_instantiation(self, testdir, monkeypatch): + pluginmanager = PluginManager() + testdir.syspathinsert() + testdir.makepyfile(pytest_xy123="#") + monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') + l1 = len(pluginmanager.getplugins()) + pluginmanager.consider_env() + l2 = len(pluginmanager.getplugins()) + assert l2 == l1 + 1 + assert pluginmanager.getplugin('pytest_xy123') + pluginmanager.consider_env() + l3 = len(pluginmanager.getplugins()) + assert l2 == l3 + + def test_consider_setuptools_instantiation(self, monkeypatch): + pkg_resources = py.test.importorskip("pkg_resources") + def my_iter(name): + assert name == "pytest11" + class EntryPoint: + name = "mytestplugin" + def load(self): + class PseudoPlugin: + x = 42 + return PseudoPlugin() + return iter([EntryPoint()]) + + monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) + pluginmanager = PluginManager() + pluginmanager.consider_setuptools_entrypoints() + plugin = pluginmanager.getplugin("mytestplugin") + assert plugin.x == 42 + plugin2 = pluginmanager.getplugin("pytest_mytestplugin") + assert plugin2 == plugin + + def test_consider_setuptools_not_installed(self, monkeypatch): + monkeypatch.setitem(py.std.sys.modules, 'pkg_resources', + py.std.types.ModuleType("pkg_resources")) + pluginmanager = PluginManager() + pluginmanager.consider_setuptools_entrypoints() + # ok, we did not explode + + def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): + x500 = testdir.makepyfile(pytest_x500="#") + p = testdir.makepyfile(""" + import py + def test_hello(): + plugin = py.test.config.pluginmanager.getplugin('x500') + assert plugin is not None + """) + monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") + result = testdir.runpytest(p) + assert result.ret == 0 + extra = result.stdout.fnmatch_lines(["*1 passed in*"]) + + def test_import_plugin_importname(self, testdir): + pluginmanager = PluginManager() + py.test.raises(ImportError, 'pluginmanager.import_plugin("x.y")') + py.test.raises(ImportError, 'pluginmanager.import_plugin("pytest_x.y")') + + reset = testdir.syspathinsert() + pluginname = "pytest_hello" + testdir.makepyfile(**{pluginname: ""}) + pluginmanager.import_plugin("hello") + len1 = len(pluginmanager.getplugins()) + pluginmanager.import_plugin("pytest_hello") + len2 = len(pluginmanager.getplugins()) + assert len1 == len2 + plugin1 = pluginmanager.getplugin("pytest_hello") + assert plugin1.__name__.endswith('pytest_hello') + plugin2 = pluginmanager.getplugin("hello") + assert plugin2 is plugin1 + + def test_consider_module(self, testdir): + pluginmanager = PluginManager() + testdir.syspathinsert() + testdir.makepyfile(pytest_plug1="#") + testdir.makepyfile(pytest_plug2="#") + mod = py.std.types.ModuleType("temp") + mod.pytest_plugins = ["pytest_plug1", "pytest_plug2"] + pluginmanager.consider_module(mod) + assert pluginmanager.getplugin("plug1").__name__ == "pytest_plug1" + assert pluginmanager.getplugin("plug2").__name__ == "pytest_plug2" + + def test_consider_module_import_module(self, testdir): + mod = py.std.types.ModuleType("x") + mod.pytest_plugins = "pytest_a" + aplugin = testdir.makepyfile(pytest_a="#") + pluginmanager = PluginManager() + reprec = testdir.getreportrecorder(pluginmanager) + #syspath.prepend(aplugin.dirpath()) + py.std.sys.path.insert(0, str(aplugin.dirpath())) + pluginmanager.consider_module(mod) + call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name) + assert call.plugin.__name__ == "pytest_a" + + # check that it is not registered twice + pluginmanager.consider_module(mod) + l = reprec.getcalls("pytest_plugin_registered") + assert len(l) == 1 + + def test_consider_conftest_deprecated(self, testdir): + pp = PluginManager() + mod = testdir.makepyfile("class ConftestPlugin: pass").pyimport() + call = py.test.raises(ValueError, pp.consider_conftest, mod) + + def test_config_sets_conftesthandle_onimport(self, testdir): + config = testdir.parseconfig([]) + assert config._conftest._onimport == config._onimportconftest + + def test_consider_conftest_deps(self, testdir): + mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() + pp = PluginManager() + py.test.raises(ImportError, "pp.consider_conftest(mod)") + + def test_registry(self): + pp = PluginManager() + class A: pass + a1, a2 = A(), A() + pp.register(a1) + assert pp.isregistered(a1) + pp.register(a2, "hello") + assert pp.isregistered(a2) + l = pp.getplugins() + assert a1 in l + assert a2 in l + assert pp.getplugin('hello') == a2 + pp.unregister(a1) + assert not pp.isregistered(a1) + pp.unregister(a2) + assert not pp.isregistered(a2) + + def test_register_imported_modules(self): + pp = PluginManager() + mod = py.std.types.ModuleType("x.y.pytest_hello") + pp.register(mod) + assert pp.isregistered(mod) + l = pp.getplugins() + assert mod in l + py.test.raises(AssertionError, "pp.register(mod)") + mod2 = py.std.types.ModuleType("pytest_hello") + #pp.register(mod2) # double registry + py.test.raises(AssertionError, "pp.register(mod)") + #assert not pp.isregistered(mod2) + assert pp.getplugins() == l + + def test_canonical_import(self, monkeypatch): + mod = py.std.types.ModuleType("pytest_xyz") + monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) + pp = PluginManager() + pp.import_plugin('xyz') + assert pp.getplugin('xyz') == mod + assert pp.getplugin('pytest_xyz') == mod + assert pp.isregistered(mod) + + def test_register_mismatch_method(self): + pp = PluginManager() + class hello: + def pytest_gurgel(self): + pass + py.test.raises(Exception, "pp.register(hello())") + + def test_register_mismatch_arg(self): + pp = PluginManager() + class hello: + def pytest_configure(self, asd): + pass + excinfo = py.test.raises(Exception, "pp.register(hello())") + + def test_canonical_importname(self): + for name in 'xyz', 'pytest_xyz', 'pytest_Xyz', 'Xyz': + impname = canonical_importname(name) + +class TestPytestPluginInteractions: + def test_do_option_conftestplugin(self, testdir): + from py.impl.test.config import Config + p = testdir.makepyfile(""" + def pytest_addoption(parser): + parser.addoption('--test123', action="store_true") + """) + config = Config() + config._conftest.importconftest(p) + print(config.pluginmanager.getplugins()) + config.parse([]) + assert not config.option.test123 + + def test_do_ext_namespace(self, testdir): + testdir.makeconftest(""" + def pytest_namespace(): + return {'hello': 'world'} + """) + p = testdir.makepyfile(""" + from py.test import hello + import py + def test_hello(): + assert hello == "world" + assert 'hello' in py.test.__all__ + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 passed*" + ]) + + def test_do_option_postinitialize(self, testdir): + from py.impl.test.config import Config + config = Config() + config.parse([]) + config.pluginmanager.do_configure(config=config) + assert not hasattr(config.option, 'test123') + p = testdir.makepyfile(""" + def pytest_addoption(parser): + parser.addoption('--test123', action="store_true", + default=True) + """) + config._conftest.importconftest(p) + assert config.option.test123 + + def test_configure(self, testdir): + config = testdir.parseconfig() + l = [] + class A: + def pytest_configure(self, config): + l.append(self) + + config.pluginmanager.register(A()) + assert len(l) == 0 + config.pluginmanager.do_configure(config=config) + assert len(l) == 1 + config.pluginmanager.register(A()) # this should lead to a configured() plugin + assert len(l) == 2 + assert l[0] != l[1] + + config.pluginmanager.do_unconfigure(config=config) + config.pluginmanager.register(A()) + assert len(l) == 2 + + # lower level API + + def test_listattr(self): + pluginmanager = PluginManager() + class My2: + x = 42 + pluginmanager.register(My2()) + assert not pluginmanager.listattr("hello") + assert pluginmanager.listattr("x") == [42] + +def test_namespace_has_default_and_env_plugins(testdir): + p = testdir.makepyfile(""" + import py + py.test.mark + """) + result = testdir.runpython(p) + assert result.ret == 0 + +def test_varnames(): + def f(x): + pass + class A: + def f(self, y): + pass + assert varnames(f) == ("x",) + assert varnames(A().f) == ('y',) + +class TestMultiCall: + def test_uses_copy_of_methods(self): + l = [lambda: 42] + mc = MultiCall(l, {}) + repr(mc) + l[:] = [] + res = mc.execute() + return res == 42 + + def test_call_passing(self): + class P1: + def m(self, __multicall__, x): + assert len(__multicall__.results) == 1 + assert not __multicall__.methods + return 17 + + class P2: + def m(self, __multicall__, x): + assert __multicall__.results == [] + assert __multicall__.methods + return 23 + + p1 = P1() + p2 = P2() + multicall = MultiCall([p1.m, p2.m], {'x': 23}) + assert "23" in repr(multicall) + reslist = multicall.execute() + assert len(reslist) == 2 + # ensure reversed order + assert reslist == [23, 17] + + def test_keyword_args(self): + def f(x): + return x + 1 + class A: + def f(self, x, y): + return x + y + multicall = MultiCall([f, A().f], dict(x=23, y=24)) + assert "'x': 23" in repr(multicall) + assert "'y': 24" in repr(multicall) + reslist = multicall.execute() + assert reslist == [24+23, 24] + assert "2 results" in repr(multicall) + + def test_keywords_call_error(self): + multicall = MultiCall([lambda x: x], {}) + py.test.raises(TypeError, "multicall.execute()") + + def test_call_subexecute(self): + def m(__multicall__): + subresult = __multicall__.execute() + return subresult + 1 + + def n(): + return 1 + + call = MultiCall([n, m], {}, firstresult=True) + res = call.execute() + assert res == 2 + + def test_call_none_is_no_result(self): + def m1(): + return 1 + def m2(): + return None + res = MultiCall([m1, m2], {}, firstresult=True).execute() + assert res == 1 + res = MultiCall([m1, m2], {}).execute() + assert res == [1] + +class TestRegistry: + + def test_register(self): + registry = Registry() + class MyPlugin: + pass + my = MyPlugin() + registry.register(my) + assert list(registry) == [my] + my2 = MyPlugin() + registry.register(my2) + assert list(registry) == [my, my2] + + assert registry.isregistered(my) + assert registry.isregistered(my2) + registry.unregister(my) + assert not registry.isregistered(my) + assert list(registry) == [my2] + + def test_listattr(self): + plugins = Registry() + class api1: + x = 41 + class api2: + x = 42 + class api3: + x = 43 + plugins.register(api1()) + plugins.register(api2()) + plugins.register(api3()) + l = list(plugins.listattr('x')) + assert l == [41, 42, 43] + l = list(plugins.listattr('x', reverse=True)) + assert l == [43, 42, 41] + +class TestHookRelay: + def test_happypath(self): + registry = Registry() + class Api: + def hello(self, arg): + pass + + mcm = HookRelay(hookspecs=Api, registry=registry) + assert hasattr(mcm, 'hello') + assert repr(mcm.hello).find("hello") != -1 + class Plugin: + def hello(self, arg): + return arg + 1 + registry.register(Plugin()) + l = mcm.hello(arg=3) + assert l == [4] + assert not hasattr(mcm, 'world') + + def test_only_kwargs(self): + registry = Registry() + class Api: + def hello(self, arg): + pass + mcm = HookRelay(hookspecs=Api, registry=registry) + py.test.raises(TypeError, "mcm.hello(3)") + + def test_firstresult_definition(self): + registry = Registry() + class Api: + def hello(self, arg): pass + hello.firstresult = True + + mcm = HookRelay(hookspecs=Api, registry=registry) + class Plugin: + def hello(self, arg): + return arg + 1 + registry.register(Plugin()) + res = mcm.hello(arg=3) + assert res == 4 + --- a/testing/pytest/test_parseopt.py +++ /dev/null @@ -1,119 +0,0 @@ -import py -from py.impl.test import parseopt - -class TestParser: - def test_init(self, capsys): - parser = parseopt.Parser(usage="xyz") - py.test.raises(SystemExit, 'parser.parse(["-h"])') - out, err = capsys.readouterr() - assert out.find("xyz") != -1 - - def test_group_add_and_get(self): - parser = parseopt.Parser() - group = parser.addgroup("hello", description="desc") - assert group.name == "hello" - assert group.description == "desc" - - def test_addgroup_deprecation(self, recwarn): - parser = parseopt.Parser() - group = parser.addgroup("hello", description="desc") - assert recwarn.pop() - group2 = parser.getgroup("hello") - assert group == group2 - - def test_getgroup_simple(self): - parser = parseopt.Parser() - group = parser.getgroup("hello", description="desc") - assert group.name == "hello" - assert group.description == "desc" - group2 = parser.getgroup("hello") - assert group2 is group - - def test_group_ordering(self): - parser = parseopt.Parser() - group0 = parser.getgroup("1") - group1 = parser.getgroup("2") - group1 = parser.getgroup("3", after="1") - groups = parser._groups - groups_names = [x.name for x in groups] - assert groups_names == list("132") - - def test_group_addoption(self): - group = parseopt.OptionGroup("hello") - group.addoption("--option1", action="store_true") - assert len(group.options) == 1 - assert isinstance(group.options[0], parseopt.optparse.Option) - - def test_group_shortopt_lowercase(self): - parser = parseopt.Parser() - group = parser.addgroup("hello") - py.test.raises(ValueError, """ - group.addoption("-x", action="store_true") - """) - assert len(group.options) == 0 - group._addoption("-x", action="store_true") - assert len(group.options) == 1 - - def test_parser_addoption(self): - parser = parseopt.Parser() - group = parser.getgroup("custom options") - assert len(group.options) == 0 - group.addoption("--option1", action="store_true") - assert len(group.options) == 1 - - def test_parse(self): - parser = parseopt.Parser() - parser.addoption("--hello", dest="hello", action="store") - option, args = parser.parse(['--hello', 'world']) - assert option.hello == "world" - assert not args - - def test_parse(self): - parser = parseopt.Parser() - option, args = parser.parse([py.path.local()]) - assert args[0] == py.path.local() - - def test_parse_will_set_default(self): - parser = parseopt.Parser() - parser.addoption("--hello", dest="hello", default="x", action="store") - option, args = parser.parse([]) - assert option.hello == "x" - del option.hello - args = parser.parse_setoption([], option) - assert option.hello == "x" - - def test_parse_setoption(self): - parser = parseopt.Parser() - parser.addoption("--hello", dest="hello", action="store") - parser.addoption("--world", dest="world", default=42) - class A: pass - option = A() - args = parser.parse_setoption(['--hello', 'world'], option) - assert option.hello == "world" - assert option.world == 42 - assert not args - - def test_parse_defaultgetter(self): - def defaultget(option): - if option.type == "int": - option.default = 42 - elif option.type == "string": - option.default = "world" - parser = parseopt.Parser(processopt=defaultget) - parser.addoption("--this", dest="this", type="int", action="store") - parser.addoption("--hello", dest="hello", type="string", action="store") - parser.addoption("--no", dest="no", action="store_true") - option, args = parser.parse([]) - assert option.hello == "world" - assert option.this == 42 - - at py.test.mark.skipif("sys.version_info < (2,5)") -def test_addoption_parser_epilog(testdir): - testdir.makeconftest(""" - def pytest_addoption(parser): - parser.hints.append("hello world") - """) - result = testdir.runpytest('--help') - #assert result.ret != 0 - assert result.stdout.fnmatch_lines(["*hint: hello world*"]) - --- a/testing/pytest/test_session.py +++ /dev/null @@ -1,200 +0,0 @@ -import py - -class SessionTests: - def test_initsession(self, testdir, tmpdir): - config = testdir.reparseconfig() - session = config.initsession() - assert session.config is config - - def test_basic_testitem_events(self, testdir): - tfile = testdir.makepyfile(""" - def test_one(): - pass - def test_one_one(): - assert 0 - def test_other(): - raise ValueError(23) - def test_two(someargs): - pass - """) - reprec = testdir.inline_run(tfile) - passed, skipped, failed = reprec.listoutcomes() - assert len(skipped) == 0 - assert len(passed) == 1 - assert len(failed) == 3 - assert failed[0].item.name == "test_one_one" - assert failed[1].item.name == "test_other" - assert failed[2].item.name == "test_two" - itemstarted = reprec.getcalls("pytest_itemstart") - assert len(itemstarted) == 4 - colstarted = reprec.getcalls("pytest_collectstart") - assert len(colstarted) == 1 - col = colstarted[0].collector - assert isinstance(col, py.test.collect.Module) - - def test_nested_import_error(self, testdir): - tfile = testdir.makepyfile(""" - import import_fails - def test_this(): - assert import_fails.a == 1 - """, import_fails=""" - import does_not_work - a = 1 - """) - reprec = testdir.inline_run(tfile) - l = reprec.getfailedcollections() - assert len(l) == 1 - out = l[0].longrepr.reprcrash.message - assert out.find('does_not_work') != -1 - - def test_raises_output(self, testdir): - reprec = testdir.inline_runsource(""" - import py - def test_raises_doesnt(): - py.test.raises(ValueError, int, "3") - """) - passed, skipped, failed = reprec.listoutcomes() - assert len(failed) == 1 - out = failed[0].longrepr.reprcrash.message - if not out.find("DID NOT RAISE") != -1: - print(out) - py.test.fail("incorrect raises() output") - - def test_generator_yields_None(self, testdir): - reprec = testdir.inline_runsource(""" - def test_1(): - yield None - """) - failures = reprec.getfailedcollections() - out = failures[0].longrepr.reprcrash.message - i = out.find('TypeError') - assert i != -1 - - def test_syntax_error_module(self, testdir): - reprec = testdir.inline_runsource("this is really not python") - l = reprec.getfailedcollections() - assert len(l) == 1 - out = l[0].longrepr.reprcrash.message - assert out.find(str('not python')) != -1 - - def test_exit_first_problem(self, testdir): - reprec = testdir.inline_runsource(""" - def test_one(): assert 0 - def test_two(): assert 0 - """, '--exitfirst') - passed, skipped, failed = reprec.countoutcomes() - assert failed == 1 - assert passed == skipped == 0 - - def test_broken_repr(self, testdir): - p = testdir.makepyfile(""" - import py - class BrokenRepr1: - foo=0 - def __repr__(self): - raise Exception("Ha Ha fooled you, I'm a broken repr().") - - class TestBrokenClass: - def test_explicit_bad_repr(self): - t = BrokenRepr1() - py.test.raises(Exception, 'repr(t)') - - def test_implicit_bad_repr1(self): - t = BrokenRepr1() - assert t.foo == 1 - - """) - reprec = testdir.inline_run(p) - passed, skipped, failed = reprec.listoutcomes() - assert len(failed) == 1 - out = failed[0].longrepr.reprcrash.message - assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #' - - def test_skip_by_conftest_directory(self, testdir): - testdir.makepyfile(conftest=""" - import py - class Directory(py.test.collect.Directory): - def collect(self): - py.test.skip("intentional") - """, test_file=""" - def test_one(): pass - """) - reprec = testdir.inline_run(testdir.tmpdir) - reports = reprec.getreports("pytest_collectreport") - assert len(reports) == 1 - assert reports[0].skipped - -class TestNewSession(SessionTests): - - def test_order_of_execution(self, testdir): - reprec = testdir.inline_runsource(""" - l = [] - def test_1(): - l.append(1) - def test_2(): - l.append(2) - def test_3(): - assert l == [1,2] - class Testmygroup: - reslist = l - def test_1(self): - self.reslist.append(1) - def test_2(self): - self.reslist.append(2) - def test_3(self): - self.reslist.append(3) - def test_4(self): - assert self.reslist == [1,2,1,2,3] - """) - passed, skipped, failed = reprec.countoutcomes() - assert failed == skipped == 0 - assert passed == 7 - # also test listnames() here ... - - def test_collect_only_with_various_situations(self, testdir): - p = testdir.makepyfile( - test_one=""" - def test_one(): - raise ValueError() - - class TestX: - def test_method_one(self): - pass - - class TestY(TestX): - pass - """, - test_two=""" - import py - py.test.skip('xxx') - """, - test_three="xxxdsadsadsadsa", - __init__="" - ) - reprec = testdir.inline_run('--collectonly', p.dirpath()) - - itemstarted = reprec.getcalls("pytest_itemstart") - assert len(itemstarted) == 3 - assert not reprec.getreports("pytest_runtest_logreport") - started = reprec.getcalls("pytest_collectstart") - finished = reprec.getreports("pytest_collectreport") - assert len(started) == len(finished) - assert len(started) == 8 - colfail = [x for x in finished if x.failed] - colskipped = [x for x in finished if x.skipped] - assert len(colfail) == 1 - assert len(colskipped) == 1 - - def test_minus_x_import_error(self, testdir): - testdir.makepyfile(__init__="") - testdir.makepyfile(test_one="xxxx", test_two="yyyy") - reprec = testdir.inline_run("-x", testdir.tmpdir) - finished = reprec.getreports("pytest_collectreport") - colfail = [x for x in finished if x.failed] - assert len(colfail) == 1 - -class TestNewSessionDSession(SessionTests): - def parseconfig(self, *args): - args = ('-n1',) + args - return SessionTests.parseconfig(self, *args) - --- /dev/null +++ b/testing/test_session.py @@ -0,0 +1,200 @@ +import py + +class SessionTests: + def test_initsession(self, testdir, tmpdir): + config = testdir.reparseconfig() + session = config.initsession() + assert session.config is config + + def test_basic_testitem_events(self, testdir): + tfile = testdir.makepyfile(""" + def test_one(): + pass + def test_one_one(): + assert 0 + def test_other(): + raise ValueError(23) + def test_two(someargs): + pass + """) + reprec = testdir.inline_run(tfile) + passed, skipped, failed = reprec.listoutcomes() + assert len(skipped) == 0 + assert len(passed) == 1 + assert len(failed) == 3 + assert failed[0].item.name == "test_one_one" + assert failed[1].item.name == "test_other" + assert failed[2].item.name == "test_two" + itemstarted = reprec.getcalls("pytest_itemstart") + assert len(itemstarted) == 4 + colstarted = reprec.getcalls("pytest_collectstart") + assert len(colstarted) == 1 + col = colstarted[0].collector + assert isinstance(col, py.test.collect.Module) + + def test_nested_import_error(self, testdir): + tfile = testdir.makepyfile(""" + import import_fails + def test_this(): + assert import_fails.a == 1 + """, import_fails=""" + import does_not_work + a = 1 + """) + reprec = testdir.inline_run(tfile) + l = reprec.getfailedcollections() + assert len(l) == 1 + out = l[0].longrepr.reprcrash.message + assert out.find('does_not_work') != -1 + + def test_raises_output(self, testdir): + reprec = testdir.inline_runsource(""" + import py + def test_raises_doesnt(): + py.test.raises(ValueError, int, "3") + """) + passed, skipped, failed = reprec.listoutcomes() + assert len(failed) == 1 + out = failed[0].longrepr.reprcrash.message + if not out.find("DID NOT RAISE") != -1: + print(out) + py.test.fail("incorrect raises() output") + + def test_generator_yields_None(self, testdir): + reprec = testdir.inline_runsource(""" + def test_1(): + yield None + """) + failures = reprec.getfailedcollections() + out = failures[0].longrepr.reprcrash.message + i = out.find('TypeError') + assert i != -1 + + def test_syntax_error_module(self, testdir): + reprec = testdir.inline_runsource("this is really not python") + l = reprec.getfailedcollections() + assert len(l) == 1 + out = l[0].longrepr.reprcrash.message + assert out.find(str('not python')) != -1 + + def test_exit_first_problem(self, testdir): + reprec = testdir.inline_runsource(""" + def test_one(): assert 0 + def test_two(): assert 0 + """, '--exitfirst') + passed, skipped, failed = reprec.countoutcomes() + assert failed == 1 + assert passed == skipped == 0 + + def test_broken_repr(self, testdir): + p = testdir.makepyfile(""" + import py + class BrokenRepr1: + foo=0 + def __repr__(self): + raise Exception("Ha Ha fooled you, I'm a broken repr().") + + class TestBrokenClass: + def test_explicit_bad_repr(self): + t = BrokenRepr1() + py.test.raises(Exception, 'repr(t)') + + def test_implicit_bad_repr1(self): + t = BrokenRepr1() + assert t.foo == 1 + + """) + reprec = testdir.inline_run(p) + passed, skipped, failed = reprec.listoutcomes() + assert len(failed) == 1 + out = failed[0].longrepr.reprcrash.message + assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #' + + def test_skip_by_conftest_directory(self, testdir): + testdir.makepyfile(conftest=""" + import py + class Directory(py.test.collect.Directory): + def collect(self): + py.test.skip("intentional") + """, test_file=""" + def test_one(): pass + """) + reprec = testdir.inline_run(testdir.tmpdir) + reports = reprec.getreports("pytest_collectreport") + assert len(reports) == 1 + assert reports[0].skipped + +class TestNewSession(SessionTests): + + def test_order_of_execution(self, testdir): + reprec = testdir.inline_runsource(""" + l = [] + def test_1(): + l.append(1) + def test_2(): + l.append(2) + def test_3(): + assert l == [1,2] + class Testmygroup: + reslist = l + def test_1(self): + self.reslist.append(1) + def test_2(self): + self.reslist.append(2) + def test_3(self): + self.reslist.append(3) + def test_4(self): + assert self.reslist == [1,2,1,2,3] + """) + passed, skipped, failed = reprec.countoutcomes() + assert failed == skipped == 0 + assert passed == 7 + # also test listnames() here ... + + def test_collect_only_with_various_situations(self, testdir): + p = testdir.makepyfile( + test_one=""" + def test_one(): + raise ValueError() + + class TestX: + def test_method_one(self): + pass + + class TestY(TestX): + pass + """, + test_two=""" + import py + py.test.skip('xxx') + """, + test_three="xxxdsadsadsadsa", + __init__="" + ) + reprec = testdir.inline_run('--collectonly', p.dirpath()) + + itemstarted = reprec.getcalls("pytest_itemstart") + assert len(itemstarted) == 3 + assert not reprec.getreports("pytest_runtest_logreport") + started = reprec.getcalls("pytest_collectstart") + finished = reprec.getreports("pytest_collectreport") + assert len(started) == len(finished) + assert len(started) == 8 + colfail = [x for x in finished if x.failed] + colskipped = [x for x in finished if x.skipped] + assert len(colfail) == 1 + assert len(colskipped) == 1 + + def test_minus_x_import_error(self, testdir): + testdir.makepyfile(__init__="") + testdir.makepyfile(test_one="xxxx", test_two="yyyy") + reprec = testdir.inline_run("-x", testdir.tmpdir) + finished = reprec.getreports("pytest_collectreport") + colfail = [x for x in finished if x.failed] + assert len(colfail) == 1 + +class TestNewSessionDSession(SessionTests): + def parseconfig(self, *args): + args = ('-n1',) + args + return SessionTests.parseconfig(self, *args) + --- a/testing/pytest/test_pycollect.py +++ /dev/null @@ -1,442 +0,0 @@ -import py - -class TestModule: - def test_module_file_not_found(self, testdir): - tmpdir = testdir.tmpdir - fn = tmpdir.join('nada','no') - col = py.test.collect.Module(fn, config=testdir.Config()) - col.config = testdir.parseconfig(tmpdir) - py.test.raises(py.error.ENOENT, col.collect) - - def test_failing_import(self, testdir): - modcol = testdir.getmodulecol("import alksdjalskdjalkjals") - py.test.raises(ImportError, modcol.collect) - py.test.raises(ImportError, modcol.collect) - py.test.raises(ImportError, modcol.run) - - def test_import_duplicate(self, testdir): - a = testdir.mkdir("a") - b = testdir.mkdir("b") - p = a.ensure("test_whatever.py") - p.pyimport() - del py.std.sys.modules['test_whatever'] - b.ensure("test_whatever.py") - result = testdir.runpytest() - s = result.stdout.str() - assert 'mismatch' in s - assert 'test_whatever' in s - - def test_syntax_error_in_module(self, testdir): - modcol = testdir.getmodulecol("this is a syntax error") - py.test.raises(SyntaxError, modcol.collect) - py.test.raises(SyntaxError, modcol.collect) - py.test.raises(SyntaxError, modcol.run) - - def test_module_considers_pluginmanager_at_import(self, testdir): - modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',") - py.test.raises(ImportError, "modcol.obj") - -class TestClass: - def test_class_with_init_not_collected(self, testdir): - modcol = testdir.getmodulecol(""" - class TestClass1: - def __init__(self): - pass - class TestClass2(object): - def __init__(self): - pass - """) - l = modcol.collect() - assert len(l) == 0 - -if py.std.sys.version_info > (3, 0): - _func_name_attr = "__name__" -else: - _func_name_attr = "func_name" - -class TestGenerator: - def test_generative_functions(self, testdir): - modcol = testdir.getmodulecol(""" - def func1(arg, arg2): - assert arg == arg2 - - def test_gen(): - yield func1, 17, 3*5 - yield func1, 42, 6*7 - """) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, py.test.collect.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], py.test.collect.Function) - assert isinstance(gencolitems[1], py.test.collect.Function) - assert gencolitems[0].name == '[0]' - assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' - - def test_generative_methods(self, testdir): - modcol = testdir.getmodulecol(""" - def func1(arg, arg2): - assert arg == arg2 - class TestGenMethods: - def test_gen(self): - yield func1, 17, 3*5 - yield func1, 42, 6*7 - """) - gencol = modcol.collect()[0].collect()[0].collect()[0] - assert isinstance(gencol, py.test.collect.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], py.test.collect.Function) - assert isinstance(gencolitems[1], py.test.collect.Function) - assert gencolitems[0].name == '[0]' - assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' - - def test_generative_functions_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol(""" - def func1(arg, arg2): - assert arg == arg2 - - def test_gen(): - yield "seventeen", func1, 17, 3*5 - yield "fortytwo", func1, 42, 6*7 - """) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, py.test.collect.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], py.test.collect.Function) - assert isinstance(gencolitems[1], py.test.collect.Function) - assert gencolitems[0].name == "['seventeen']" - assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' - assert gencolitems[1].name == "['fortytwo']" - assert getattr(gencolitems[1].obj, _func_name_attr) == 'func1' - - def test_generative_functions_unique_explicit_names(self, testdir): - # generative - modcol = testdir.getmodulecol(""" - def func(): pass - def test_gen(): - yield "name", func - yield "name", func - """) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, py.test.collect.Generator) - py.test.raises(ValueError, "gencol.collect()") - - def test_generative_methods_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol(""" - def func1(arg, arg2): - assert arg == arg2 - class TestGenMethods: - def test_gen(self): - yield "m1", func1, 17, 3*5 - yield "m2", func1, 42, 6*7 - """) - gencol = modcol.collect()[0].collect()[0].collect()[0] - assert isinstance(gencol, py.test.collect.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], py.test.collect.Function) - assert isinstance(gencolitems[1], py.test.collect.Function) - assert gencolitems[0].name == "['m1']" - assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' - assert gencolitems[1].name == "['m2']" - assert getattr(gencolitems[1].obj, _func_name_attr) == 'func1' - - def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): - o = testdir.makepyfile(""" - def test_generative_order_of_execution(): - import py - test_list = [] - expected_list = list(range(6)) - - def list_append(item): - test_list.append(item) - - def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) - assert test_list == expected_list - - for i in expected_list: - yield list_append, i - yield assert_order_of_execution - """) - reprec = testdir.inline_run(o) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 7 - assert not skipped and not failed - - def test_order_of_execution_generator_different_codeline(self, testdir): - o = testdir.makepyfile(""" - def test_generative_tests_different_codeline(): - import py - test_list = [] - expected_list = list(range(3)) - - def list_append_2(): - test_list.append(2) - - def list_append_1(): - test_list.append(1) - - def list_append_0(): - test_list.append(0) - - def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) - assert test_list == expected_list - - yield list_append_0 - yield list_append_1 - yield list_append_2 - yield assert_order_of_execution - """) - reprec = testdir.inline_run(o) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 4 - assert not skipped and not failed - -class TestFunction: - def test_getmodulecollector(self, testdir): - item = testdir.getitem("def test_func(): pass") - modcol = item.getparent(py.test.collect.Module) - assert isinstance(modcol, py.test.collect.Module) - assert hasattr(modcol.obj, 'test_func') - - def test_function_equality(self, testdir, tmpdir): - config = testdir.reparseconfig() - f1 = py.test.collect.Function(name="name", config=config, - args=(1,), callobj=isinstance) - f2 = py.test.collect.Function(name="name",config=config, - args=(1,), callobj=py.builtin.callable) - assert not f1 == f2 - assert f1 != f2 - f3 = py.test.collect.Function(name="name", config=config, - args=(1,2), callobj=py.builtin.callable) - assert not f3 == f2 - assert f3 != f2 - - assert not f3 == f1 - assert f3 != f1 - - f1_b = py.test.collect.Function(name="name", config=config, - args=(1,), callobj=isinstance) - assert f1 == f1_b - assert not f1 != f1_b - - def test_function_equality_with_callspec(self, testdir, tmpdir): - config = testdir.reparseconfig() - class callspec1: - param = 1 - funcargs = {} - id = "hello" - class callspec2: - param = 1 - funcargs = {} - id = "world" - f5 = py.test.collect.Function(name="name", config=config, - callspec=callspec1, callobj=isinstance) - f5b = py.test.collect.Function(name="name", config=config, - callspec=callspec2, callobj=isinstance) - assert f5 != f5b - assert not (f5 == f5b) - - def test_pyfunc_call(self, testdir): - item = testdir.getitem("def test_func(): raise ValueError") - config = item.config - class MyPlugin1: - def pytest_pyfunc_call(self, pyfuncitem): - raise ValueError - class MyPlugin2: - def pytest_pyfunc_call(self, pyfuncitem): - return True - config.pluginmanager.register(MyPlugin1()) - config.pluginmanager.register(MyPlugin2()) - config.hook.pytest_pyfunc_call(pyfuncitem=item) - -class TestSorting: - def test_check_equality(self, testdir): - modcol = testdir.getmodulecol(""" - def test_pass(): pass - def test_fail(): assert 0 - """) - fn1 = modcol.collect_by_name("test_pass") - assert isinstance(fn1, py.test.collect.Function) - fn2 = modcol.collect_by_name("test_pass") - assert isinstance(fn2, py.test.collect.Function) - - assert fn1 == fn2 - assert fn1 != modcol - if py.std.sys.version_info < (3, 0): - assert cmp(fn1, fn2) == 0 - assert hash(fn1) == hash(fn2) - - fn3 = modcol.collect_by_name("test_fail") - assert isinstance(fn3, py.test.collect.Function) - assert not (fn1 == fn3) - assert fn1 != fn3 - - for fn in fn1,fn2,fn3: - assert fn != 3 - assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn - assert modcol != fn - - def test_allow_sane_sorting_for_decorators(self, testdir): - modcol = testdir.getmodulecol(""" - def dec(f): - g = lambda: f(2) - g.place_as = f - return g - - - def test_b(y): - pass - test_b = dec(test_b) - - def test_a(y): - pass - test_a = dec(test_a) - """) - colitems = modcol.collect() - assert len(colitems) == 2 - assert [item.name for item in colitems] == ['test_b', 'test_a'] - - -class TestConftestCustomization: - def test_pytest_pycollect_makeitem(self, testdir): - testdir.makeconftest(""" - import py - class MyFunction(py.test.collect.Function): - pass - def pytest_pycollect_makeitem(collector, name, obj): - if name == "some": - return MyFunction(name, collector) - """) - testdir.makepyfile("def some(): pass") - result = testdir.runpytest("--collectonly") - result.stdout.fnmatch_lines([ - "*MyFunction*some*", - ]) - - def test_makeitem_non_underscore(self, testdir, monkeypatch): - modcol = testdir.getmodulecol("def _hello(): pass") - l = [] - monkeypatch.setattr(py.test.collect.Module, 'makeitem', - lambda self, name, obj: l.append(name)) - modcol._buildname2items() - assert '_hello' not in l - - -class TestReportinfo: - - def test_func_reportinfo(self, testdir): - item = testdir.getitem("def test_func(): pass") - fspath, lineno, modpath = item.reportinfo() - assert fspath == item.fspath - assert lineno == 0 - assert modpath == "test_func" - - def test_class_reportinfo(self, testdir): - modcol = testdir.getmodulecol(""" - # lineno 0 - class TestClass: - def test_hello(self): pass - """) - classcol = modcol.collect_by_name("TestClass") - fspath, lineno, msg = classcol.reportinfo() - assert fspath == modcol.fspath - assert lineno == 1 - assert msg == "TestClass" - - def test_generator_reportinfo(self, testdir): - modcol = testdir.getmodulecol(""" - # lineno 0 - def test_gen(): - def check(x): - assert x - yield check, 3 - """) - gencol = modcol.collect_by_name("test_gen") - fspath, lineno, modpath = gencol.reportinfo() - assert fspath == modcol.fspath - assert lineno == 1 - assert modpath == "test_gen" - - genitem = gencol.collect()[0] - fspath, lineno, modpath = genitem.reportinfo() - assert fspath == modcol.fspath - assert lineno == 2 - assert modpath == "test_gen[0]" - """ - def test_func(): - pass - def test_genfunc(): - def check(x): - pass - yield check, 3 - class TestClass: - def test_method(self): - pass - """ - -def test_setup_only_available_in_subdir(testdir): - sub1 = testdir.mkpydir("sub1") - sub2 = testdir.mkpydir("sub2") - sub1.join("conftest.py").write(py.code.Source(""" - import py - def pytest_runtest_setup(item): - assert item.fspath.purebasename == "test_in_sub1" - def pytest_runtest_call(item): - assert item.fspath.purebasename == "test_in_sub1" - def pytest_runtest_teardown(item): - assert item.fspath.purebasename == "test_in_sub1" - """)) - sub2.join("conftest.py").write(py.code.Source(""" - import py - def pytest_runtest_setup(item): - assert item.fspath.purebasename == "test_in_sub2" - def pytest_runtest_call(item): - assert item.fspath.purebasename == "test_in_sub2" - def pytest_runtest_teardown(item): - assert item.fspath.purebasename == "test_in_sub2" - """)) - sub1.join("test_in_sub1.py").write("def test_1(): pass") - sub2.join("test_in_sub2.py").write("def test_2(): pass") - result = testdir.runpytest("-v", "-s") - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) - -def test_generate_tests_only_done_in_subdir(testdir): - sub1 = testdir.mkpydir("sub1") - sub2 = testdir.mkpydir("sub2") - sub1.join("conftest.py").write(py.code.Source(""" - def pytest_generate_tests(metafunc): - assert metafunc.function.__name__ == "test_1" - """)) - sub2.join("conftest.py").write(py.code.Source(""" - def pytest_generate_tests(metafunc): - assert metafunc.function.__name__ == "test_2" - """)) - sub1.join("test_in_sub1.py").write("def test_1(): pass") - sub2.join("test_in_sub2.py").write("def test_2(): pass") - result = testdir.runpytest("-v", "-s", sub1, sub2, sub1) - result.stdout.fnmatch_lines([ - "*3 passed*" - ]) - -def test_modulecol_roundtrip(testdir): - modcol = testdir.getmodulecol("pass", withinit=True) - trail = modcol.config._rootcol.totrail(modcol) - newcol = modcol.config._rootcol.fromtrail(trail) - assert modcol.name == newcol.name --- /dev/null +++ b/testing/root/test_builtin.py @@ -0,0 +1,148 @@ +import sys +import py +from py.builtin import set, frozenset, reversed, sorted + +def test_enumerate(): + l = [0,1,2] + for i,x in enumerate(l): + assert i == x + +def test_BaseException(): + assert issubclass(IndexError, py.builtin.BaseException) + assert issubclass(Exception, py.builtin.BaseException) + assert issubclass(KeyboardInterrupt, py.builtin.BaseException) + + class MyRandomClass(object): + pass + assert not issubclass(MyRandomClass, py.builtin.BaseException) + + assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins') + assert Exception.__name__ == 'Exception' + + +def test_GeneratorExit(): + assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins') + assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException) + +def test_reversed(): + reversed = py.builtin.reversed + r = reversed("hello") + assert iter(r) is r + s = "".join(list(r)) + assert s == "olleh" + assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o'] + py.test.raises(TypeError, reversed, reversed("hello")) + +def test_simple(): + s = set([1, 2, 3, 4]) + assert s == set([3, 4, 2, 1]) + s1 = s.union(set([5, 6])) + assert 5 in s1 + assert 1 in s1 + +def test_frozenset(): + s = set([frozenset([0, 1]), frozenset([1, 0])]) + assert len(s) == 1 + +def test_sorted(): + if sorted == py.builtin.sorted: + return # don't test a real builtin + for s in [py.builtin.sorted]: + def test(): + assert s([3, 2, 1]) == [1, 2, 3] + assert s([1, 2, 3], reverse=True) == [3, 2, 1] + l = s([1, 2, 3, 4, 5, 6], key=lambda x: x % 2) + assert l == [2, 4, 6, 1, 3, 5] + l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y)) + assert l == [4, 3, 2, 1] + l = s([1, 2, 3, 4], cmp=lambda x, y: -cmp(x, y), + key=lambda x: x % 2) + assert l == [1, 3, 2, 4] + + def compare(x, y): + assert type(x) == str + assert type(y) == str + return cmp(x, y) + data = 'The quick Brown fox Jumped over The lazy Dog'.split() + s(data, cmp=compare, key=str.lower) + yield test + + +def test_print_simple(): + from py.builtin import print_ + py.test.raises(TypeError, "print_(hello=3)") + f = py.io.TextIO() + print_("hello", "world", file=f) + s = f.getvalue() + assert s == "hello world\n" + + f = py.io.TextIO() + print_("hello", end="", file=f) + s = f.getvalue() + assert s == "hello" + + f = py.io.TextIO() + print_("xyz", "abc", sep="", end="", file=f) + s = f.getvalue() + assert s == "xyzabc" + + class X: + def __repr__(self): return "rep" + f = py.io.TextIO() + print_(X(), file=f) + assert f.getvalue() == "rep\n" + +def test_execfile(tmpdir): + test_file = tmpdir.join("test.py") + test_file.write("x = y\ndef f(): pass") + ns = {"y" : 42} + py.builtin.execfile(str(test_file), ns) + assert ns["x"] == 42 + assert py.code.getrawcode(ns["f"]).co_filename == str(test_file) + class A: + y = 3 + x = 4 + py.builtin.execfile(str(test_file)) + assert A.x == 3 + +def test_getfuncdict(): + def f(): + pass + f.x = 4 + assert py.builtin._getfuncdict(f)["x"] == 4 + assert py.builtin._getfuncdict(2) is None + +def test_callable(): + class A: pass + assert py.builtin.callable(test_callable) + assert py.builtin.callable(A) + assert py.builtin.callable(list) + assert py.builtin.callable(id) + assert not py.builtin.callable(4) + assert not py.builtin.callable("hi") + +def test_totext(): + py.builtin._totext("hello", "UTF-8") + +def test_reraise(): + from py.builtin import _reraise + try: + raise Exception() + except Exception: + cls, val, tb = sys.exc_info() + excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)") + +def test_exec(): + l = [] + py.builtin.exec_("l.append(1)") + assert l == [1] + d = {} + py.builtin.exec_("x=4", d) + assert d['x'] == 4 + +def test_tryimport(): + py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123') + x = py.builtin._tryimport('asldkajsdl', 'py') + assert x == py + x = py.builtin._tryimport('asldkajsdl', 'py.path') + assert x == py.path --- /dev/null +++ b/testing/test_config.py @@ -0,0 +1,248 @@ +import py +from py.impl.test.collect import RootCollector + + +class TestConfigCmdlineParsing: + def test_parser_addoption_default_env(self, testdir, monkeypatch): + import os + config = testdir.Config() + group = config._parser.getgroup("hello") + + monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION1', 'True') + group.addoption("--option1", action="store_true") + assert group.options[0].default == True + + monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION2', 'abc') + group.addoption("--option2", action="store", default="x") + assert group.options[1].default == "abc" + + monkeypatch.setitem(os.environ, 'PYTEST_OPTION_OPTION3', '32') + group.addoption("--option3", action="store", type="int") + assert group.options[2].default == 32 + + group.addoption("--option4", action="store", type="int") + assert group.options[3].default == ("NO", "DEFAULT") + + def test_parser_addoption_default_conftest(self, testdir, monkeypatch): + import os + testdir.makeconftest("option_verbose=True") + config = testdir.parseconfig() + assert config.option.verbose + + def test_parsing_again_fails(self, testdir): + config = testdir.reparseconfig([testdir.tmpdir]) + py.test.raises(AssertionError, "config.parse([])") + + +class TestConfigTmpdir: + def test_getbasetemp(self, testdir): + config = testdir.Config() + config.basetemp = "hello" + config.getbasetemp() == "hello" + + def test_mktemp(self, testdir): + config = testdir.Config() + config.basetemp = testdir.mkdir("hello") + tmp = config.mktemp("world") + assert tmp.relto(config.basetemp) == "world" + tmp = config.mktemp("this", numbered=True) + assert tmp.relto(config.basetemp).startswith("this") + tmp2 = config.mktemp("this", numbered=True) + assert tmp2.relto(config.basetemp).startswith("this") + assert tmp2 != tmp + + def test_reparse(self, testdir): + config2 = testdir.reparseconfig([]) + config3 = testdir.reparseconfig([]) + assert config2.getbasetemp() != config3.getbasetemp() + assert not config2.getbasetemp().relto(config3.getbasetemp()) + assert not config3.getbasetemp().relto(config2.getbasetemp()) + +class TestConfigAPI: + + def test_config_getvalue_honours_conftest(self, testdir): + testdir.makepyfile(conftest="x=1") + testdir.mkdir("sub").join("conftest.py").write("x=2 ; y = 3") + config = testdir.parseconfig() + o = testdir.tmpdir + assert config.getvalue("x") == 1 + assert config.getvalue("x", o.join('sub')) == 2 + py.test.raises(KeyError, "config.getvalue('y')") + config = testdir.reparseconfig([str(o.join('sub'))]) + assert config.getvalue("x") == 2 + assert config.getvalue("y") == 3 + assert config.getvalue("x", o) == 1 + py.test.raises(KeyError, 'config.getvalue("y", o)') + + def test_config_getvalueorskip(self, testdir): + from py.impl.test.outcome import Skipped + config = testdir.parseconfig() + py.test.raises(Skipped, "config.getvalueorskip('hello')") + verbose = config.getvalueorskip("verbose") + assert verbose == config.option.verbose + config.option.hello = None + py.test.raises(Skipped, "config.getvalueorskip('hello')") + + def test_config_overwrite(self, testdir): + o = testdir.tmpdir + o.ensure("conftest.py").write("x=1") + config = testdir.reparseconfig([str(o)]) + assert config.getvalue('x') == 1 + config.option.x = 2 + assert config.getvalue('x') == 2 + config = testdir.reparseconfig([str(o)]) + assert config.getvalue('x') == 1 + + def test_getconftest_pathlist(self, testdir, tmpdir): + somepath = tmpdir.join("x", "y", "z") + p = tmpdir.join("conftest.py") + p.write("pathlist = ['.', %r]" % str(somepath)) + config = testdir.reparseconfig([p]) + assert config.getconftest_pathlist('notexist') is None + pl = config.getconftest_pathlist('pathlist') + print(pl) + assert len(pl) == 2 + assert pl[0] == tmpdir + assert pl[1] == somepath + + def test_setsessionclass_and_initsession(self, testdir): + config = testdir.Config() + class Session1: + def __init__(self, config): + self.config = config + config.setsessionclass(Session1) + session = config.initsession() + assert isinstance(session, Session1) + assert session.config is config + py.test.raises(ValueError, "config.setsessionclass(Session1)") + + +class TestConfigApi_getinitialnodes: + def test_onedir(self, testdir): + config = testdir.reparseconfig([testdir.tmpdir]) + colitems = config.getinitialnodes() + assert len(colitems) == 1 + col = colitems[0] + assert isinstance(col, py.test.collect.Directory) + for col in col.listchain(): + assert col.config is config + + def test_twodirs(self, testdir, tmpdir): + config = testdir.reparseconfig([tmpdir, tmpdir]) + colitems = config.getinitialnodes() + assert len(colitems) == 2 + col1, col2 = colitems + assert col1.name == col2.name + assert col1.parent == col2.parent + + def test_curdir_and_subdir(self, testdir, tmpdir): + a = tmpdir.ensure("a", dir=1) + config = testdir.reparseconfig([tmpdir, a]) + colitems = config.getinitialnodes() + assert len(colitems) == 2 + col1, col2 = colitems + assert col1.name == tmpdir.basename + assert col2.name == 'a' + for col in colitems: + for subcol in col.listchain(): + assert col.config is config + + def test_global_file(self, testdir, tmpdir): + x = tmpdir.ensure("x.py") + config = testdir.reparseconfig([x]) + col, = config.getinitialnodes() + assert isinstance(col, py.test.collect.Module) + assert col.name == 'x.py' + assert col.parent.name == tmpdir.basename + assert isinstance(col.parent.parent, RootCollector) + for col in col.listchain(): + assert col.config is config + + def test_global_dir(self, testdir, tmpdir): + x = tmpdir.ensure("a", dir=1) + config = testdir.reparseconfig([x]) + col, = config.getinitialnodes() + assert isinstance(col, py.test.collect.Directory) + print(col.listchain()) + assert col.name == 'a' + assert isinstance(col.parent, RootCollector) + assert col.config is config + + def test_pkgfile(self, testdir, tmpdir): + x = tmpdir.ensure("x.py") + tmpdir.ensure("__init__.py") + config = testdir.reparseconfig([x]) + col, = config.getinitialnodes() + assert isinstance(col, py.test.collect.Module) + assert col.name == 'x.py' + assert col.parent.name == x.dirpath().basename + assert isinstance(col.parent.parent.parent, RootCollector) + for col in col.listchain(): + assert col.config is config + +class TestConfig_gettopdir: + def test_gettopdir(self, testdir): + from py.impl.test.config import gettopdir + tmp = testdir.tmpdir + assert gettopdir([tmp]) == tmp + topdir = gettopdir([tmp.join("hello"), tmp.join("world")]) + assert topdir == tmp + somefile = tmp.ensure("somefile.py") + assert gettopdir([somefile]) == tmp + + def test_gettopdir_pypkg(self, testdir): + from py.impl.test.config import gettopdir + tmp = testdir.tmpdir + a = tmp.ensure('a', dir=1) + b = tmp.ensure('a', 'b', '__init__.py') + c = tmp.ensure('a', 'b', 'c.py') + Z = tmp.ensure('Z', dir=1) + assert gettopdir([c]) == a + assert gettopdir([c, Z]) == tmp + assert gettopdir(["%s::xyc" % c]) == a + assert gettopdir(["%s::xyc::abc" % c]) == a + assert gettopdir(["%s::xyc" % c, "%s::abc" % Z]) == tmp + +def test_options_on_small_file_do_not_blow_up(testdir): + def runfiletest(opts): + reprec = testdir.inline_run(*opts) + passed, skipped, failed = reprec.countoutcomes() + assert failed == 2 + assert skipped == passed == 0 + path = testdir.makepyfile(""" + def test_f1(): assert 0 + def test_f2(): assert 0 + """) + + for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'], + ['--tb=long'], ['--fulltrace'], ['--nomagic'], + ['--traceconfig'], ['-v'], ['-v', '-v']): + runfiletest(opts + [path]) + +def test_ensuretemp(recwarn): + #py.test.deprecated_call(py.test.ensuretemp, 'hello') + d1 = py.test.ensuretemp('hello') + d2 = py.test.ensuretemp('hello') + assert d1 == d2 + assert d1.check(dir=1) + +def test_preparse_ordering(testdir, monkeypatch): + pkg_resources = py.test.importorskip("pkg_resources") + def my_iter(name): + assert name == "pytest11" + class EntryPoint: + name = "mytestplugin" + def load(self): + class PseudoPlugin: + x = 42 + return PseudoPlugin() + return iter([EntryPoint()]) + monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) + testdir.makeconftest(""" + pytest_plugins = "mytestplugin", + """) + monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin") + config = testdir.parseconfig() + plugin = config.pluginmanager.getplugin("mytestplugin") + assert plugin.x == 42 + --- /dev/null +++ b/testing/conftest.py @@ -0,0 +1,3 @@ + +pytest_plugins = "skipping", "pytester", "tmpdir" + --- a/testing/pytest/test_deprecated_api.py +++ /dev/null @@ -1,344 +0,0 @@ - -import py -from py.impl.test.outcome import Skipped - -class TestCollectDeprecated: - - def test_collect_with_deprecated_run_and_join(self, testdir, recwarn): - testdir.makeconftest(""" - import py - - class MyInstance(py.test.collect.Instance): - def run(self): - return ['check2'] - def join(self, name): - if name == 'check2': - return self.Function(name=name, parent=self) - - class MyClass(py.test.collect.Class): - def run(self): - return ['check2'] - def join(self, name): - return MyInstance(name='i', parent=self) - - class MyModule(py.test.collect.Module): - def run(self): - return ['check', 'Cls'] - def join(self, name): - if name == 'check': - return self.Function(name, parent=self) - if name == 'Cls': - return MyClass(name, parent=self) - - class MyDirectory(py.test.collect.Directory): - Module = MyModule - def run(self): - return ['somefile.py'] - def join(self, name): - if name == "somefile.py": - return self.Module(self.fspath.join(name), parent=self) - - def pytest_collect_directory(path, parent): - if path.basename == "subconf": - return MyDirectory(path, parent) - """) - subconf = testdir.mkpydir("subconf") - somefile = subconf.join("somefile.py") - somefile.write(py.code.Source(""" - def check(): pass - class Cls: - def check2(self): pass - """)) - config = testdir.parseconfig(somefile) - dirnode = config.getnode(somefile.dirpath()) - colitems = dirnode.collect() - w = recwarn.pop(DeprecationWarning) - assert w.filename.find("conftest.py") != -1 - #recwarn.resetregistry() - #assert 0, (w.message, w.filename, w.lineno) - assert len(colitems) == 1 - modcol = colitems[0] - assert modcol.name == "somefile.py" - colitems = modcol.collect() - recwarn.pop(DeprecationWarning) - assert len(colitems) == 2 - assert colitems[0].name == 'check' - assert colitems[1].name == 'Cls' - clscol = colitems[1] - - colitems = clscol.collect() - recwarn.pop(DeprecationWarning) - assert len(colitems) == 1 - icol = colitems[0] - colitems = icol.collect() - recwarn.pop(DeprecationWarning) - assert len(colitems) == 1 - assert colitems[0].name == 'check2' - - def test_collect_with_deprecated_join_but_no_run(self, testdir, recwarn): - testdir.makepyfile(conftest=""" - import py - - class Module(py.test.collect.Module): - def funcnamefilter(self, name): - if name.startswith("check_"): - return True - return super(Module, self).funcnamefilter(name) - def join(self, name): - if name.startswith("check_"): - return self.Function(name, parent=self) - assert name != "SomeClass", "join should not be called with this name" - """) - col = testdir.getmodulecol(""" - def somefunc(): pass - def check_one(): pass - class SomeClass: pass - """) - colitems = col.collect() - recwarn.pop(DeprecationWarning) - assert len(colitems) == 1 - funcitem = colitems[0] - assert funcitem.name == "check_one" - - def test_function_custom_run(self, testdir, recwarn): - testdir.makepyfile(conftest=""" - import py - class Function(py.test.collect.Function): - def run(self): - pass - """) - modcol = testdir.getmodulecol("def test_func(): pass") - funcitem = modcol.collect()[0] - assert funcitem.name == 'test_func' - recwarn.clear() - funcitem._deprecated_testexecution() - recwarn.pop(DeprecationWarning) - - def test_function_custom_execute(self, testdir, recwarn): - testdir.makepyfile(conftest=""" - import py - - class MyFunction(py.test.collect.Function): - def execute(self, obj, *args): - pass - Function=MyFunction - """) - modcol = testdir.getmodulecol("def test_func2(): pass") - funcitem = modcol.collect()[0] - w = recwarn.pop(DeprecationWarning) # for defining conftest.Function - assert funcitem.name == 'test_func2' - funcitem._deprecated_testexecution() - w = recwarn.pop(DeprecationWarning) - assert w.filename.find("conftest.py") != -1 - - def test_function_deprecated_run_execute(self, testdir, recwarn): - testdir.makepyfile(conftest=""" - import py - - class Function(py.test.collect.Function): - - def run(self): - pass - """) - modcol = testdir.getmodulecol("def test_some2(): pass") - funcitem = modcol.collect()[0] - w = recwarn.pop(DeprecationWarning) - assert "conftest.py" in str(w.message) - - recwarn.clear() - funcitem._deprecated_testexecution() - recwarn.pop(DeprecationWarning) - - def test_function_deprecated_run_recursive(self, testdir): - testdir.makepyfile(conftest=""" - import py - class Module(py.test.collect.Module): - def run(self): - return super(Module, self).run() - """) - modcol = testdir.getmodulecol("def test_some(): pass") - colitems = py.test.deprecated_call(modcol.collect) - funcitem = colitems[0] - - def test_conftest_subclasses_Module_with_non_pyfile(self, testdir): - testdir.makepyfile(conftest=""" - import py - class Module(py.test.collect.Module): - def run(self): - return [] - class Directory(py.test.collect.Directory): - def consider_file(self, path): - if path.basename == "testme.xxx": - return Module(path, parent=self) - return super(Directory, self).consider_file(path) - """) - testme = testdir.makefile('xxx', testme="hello") - config = testdir.parseconfig(testme) - col = config.getnode(testme) - assert col.collect() == [] - - - -class TestDisabled: - def test_disabled_module(self, recwarn, testdir): - modcol = testdir.getmodulecol(""" - disabled = True - def setup_module(mod): - raise ValueError - def test_method(): - pass - """) - l = modcol.collect() - assert len(l) == 1 - recwarn.clear() - py.test.raises(Skipped, "modcol.setup()") - recwarn.pop(DeprecationWarning) - - def test_disabled_class(self, recwarn, testdir): - modcol = testdir.getmodulecol(""" - class TestClass: - disabled = True - def test_method(self): - pass - """) - l = modcol.collect() - assert len(l) == 1 - modcol = l[0] - assert isinstance(modcol, py.test.collect.Class) - l = modcol.collect() - assert len(l) == 1 - recwarn.clear() - py.test.raises(Skipped, "modcol.setup()") - recwarn.pop(DeprecationWarning) - - def test_disabled_class_functional(self, testdir): - reprec = testdir.inline_runsource(""" - class TestSimpleClassSetup: - disabled = True - def test_classlevel(self): pass - def test_classlevel2(self): pass - """) - reprec.assertoutcome(skipped=2) - - @py.test.mark.multi(name="Directory Module Class Function".split()) - def test_function_deprecated_run_execute(self, name, testdir, recwarn): - testdir.makeconftest(""" - import py - class %s(py.test.collect.%s): - pass - """ % (name, name)) - p = testdir.makepyfile(""" - class TestClass: - def test_method(self): - pass - def test_function(): - pass - """) - config = testdir.parseconfig() - if name == "Directory": - config.getnode(testdir.tmpdir) - elif name in ("Module", "File"): - config.getnode(p) - else: - fnode = config.getnode(p) - recwarn.clear() - fnode.collect() - w = recwarn.pop(DeprecationWarning) - assert "conftest.py" in str(w.message) - -def test_config_cmdline_options(recwarn, testdir): - testdir.makepyfile(conftest=""" - import py - def _callback(option, opt_str, value, parser, *args, **kwargs): - option.tdest = True - Option = py.test.config.Option - option = py.test.config.addoptions("testing group", - Option('-G', '--glong', action="store", default=42, - type="int", dest="gdest", help="g value."), - # XXX note: special case, option without a destination - Option('-T', '--tlong', action="callback", callback=_callback, - help='t value'), - ) - """) - recwarn.clear() - config = testdir.reparseconfig(['-G', '17']) - recwarn.pop(DeprecationWarning) - assert config.option.gdest == 17 - -def test_conftest_non_python_items(recwarn, testdir): - testdir.makepyfile(conftest=""" - import py - class CustomItem(py.test.collect.Item): - def run(self): - pass - class Directory(py.test.collect.Directory): - def consider_file(self, fspath): - if fspath.ext == ".xxx": - return CustomItem(fspath.basename, parent=self) - """) - checkfile = testdir.makefile(ext="xxx", hello="world") - testdir.makepyfile(x="") - testdir.maketxtfile(x="") - config = testdir.parseconfig() - recwarn.clear() - dircol = config.getnode(checkfile.dirpath()) - w = recwarn.pop(DeprecationWarning) - assert str(w.message).find("conftest.py") != -1 - colitems = dircol.collect() - assert len(colitems) == 1 - assert colitems[0].name == "hello.xxx" - assert colitems[0].__class__.__name__ == "CustomItem" - - item = config.getnode(checkfile) - assert item.name == "hello.xxx" - assert item.__class__.__name__ == "CustomItem" - -def test_extra_python_files_and_functions(testdir): - testdir.makepyfile(conftest=""" - import py - class MyFunction(py.test.collect.Function): - pass - class Directory(py.test.collect.Directory): - def consider_file(self, path): - if path.check(fnmatch="check_*.py"): - return self.Module(path, parent=self) - return super(Directory, self).consider_file(path) - class myfuncmixin: - Function = MyFunction - def funcnamefilter(self, name): - return name.startswith('check_') - class Module(myfuncmixin, py.test.collect.Module): - def classnamefilter(self, name): - return name.startswith('CustomTestClass') - class Instance(myfuncmixin, py.test.collect.Instance): - pass - """) - checkfile = testdir.makepyfile(check_file=""" - def check_func(): - assert 42 == 42 - class CustomTestClass: - def check_method(self): - assert 23 == 23 - """) - # check that directory collects "check_" files - config = testdir.parseconfig() - col = config.getnode(checkfile.dirpath()) - colitems = col.collect() - assert len(colitems) == 1 - assert isinstance(colitems[0], py.test.collect.Module) - - # check that module collects "check_" functions and methods - config = testdir.parseconfig(checkfile) - col = config.getnode(checkfile) - assert isinstance(col, py.test.collect.Module) - colitems = col.collect() - assert len(colitems) == 2 - funccol = colitems[0] - assert isinstance(funccol, py.test.collect.Function) - assert funccol.name == "check_func" - clscol = colitems[1] - assert isinstance(clscol, py.test.collect.Class) - colitems = clscol.collect()[0].collect() - assert len(colitems) == 1 - assert colitems[0].name == "check_method" - --- a/testing/pytest/__init__.py +++ /dev/null @@ -1,1 +0,0 @@ -# --- /dev/null +++ b/testing/acceptance_test.py @@ -0,0 +1,91 @@ +import sys, py + +class TestGeneralUsage: + def test_config_error(self, testdir): + testdir.makeconftest(""" + def pytest_configure(config): + raise config.Error("hello") + """) + result = testdir.runpytest(testdir.tmpdir) + assert result.ret != 0 + assert result.stderr.fnmatch_lines([ + '*ERROR: hello' + ]) + + def test_config_preparse_plugin_option(self, testdir): + testdir.makepyfile(pytest_xyz=""" + def pytest_addoption(parser): + parser.addoption("--xyz", dest="xyz", action="store") + """) + testdir.makepyfile(test_one=""" + import py + def test_option(): + assert py.test.config.option.xyz == "123" + """) + result = testdir.runpytest("-p", "xyz", "--xyz=123") + assert result.ret == 0 + assert result.stdout.fnmatch_lines([ + '*1 passed*', + ]) + + def test_basetemp(self, testdir): + mytemp = testdir.tmpdir.mkdir("mytemp") + p = testdir.makepyfile(""" + import py + def test_1(pytestconfig): + pytestconfig.getbasetemp().ensure("hello") + """) + result = testdir.runpytest(p, '--basetemp=%s' %mytemp) + assert result.ret == 0 + assert mytemp.join('hello').check() + + def test_assertion_magic(self, testdir): + p = testdir.makepyfile(""" + def test_this(): + x = 0 + assert x + """) + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + "> assert x", + "E assert 0", + ]) + assert result.ret == 1 + + def test_nested_import_error(self, testdir): + p = testdir.makepyfile(""" + import import_fails + def test_this(): + assert import_fails.a == 1 + """) + testdir.makepyfile(import_fails="import does_not_work") + result = testdir.runpytest(p) + extra = result.stdout.fnmatch_lines([ + #XXX on jython this fails: "> import import_fails", + "E ImportError: No module named does_not_work", + ]) + assert result.ret == 1 + + def test_not_collectable_arguments(self, testdir): + p1 = testdir.makepyfile("") + p2 = testdir.makefile(".pyc", "123") + result = testdir.runpytest(p1, p2) + assert result.ret != 0 + assert result.stderr.fnmatch_lines([ + "*ERROR: can't collect: %s" %(p2,) + ]) + + + def test_earlyinit(self, testdir): + p = testdir.makepyfile(""" + import py + assert hasattr(py.test, 'mark') + """) + result = testdir.runpython(p) + assert result.ret == 0 + + def test_pydoc(self, testdir): + result = testdir.runpython_c("import py ; help(py.test)") + assert result.ret == 0 + s = result.stdout.str() + assert 'MarkGenerator' in s --- /dev/null +++ b/testing/test_pycollect.py @@ -0,0 +1,470 @@ +import py + +class TestModule: + def test_module_file_not_found(self, testdir): + tmpdir = testdir.tmpdir + fn = tmpdir.join('nada','no') + col = py.test.collect.Module(fn, config=testdir.Config()) + col.config = testdir.parseconfig(tmpdir) + py.test.raises(py.error.ENOENT, col.collect) + + def test_failing_import(self, testdir): + modcol = testdir.getmodulecol("import alksdjalskdjalkjals") + py.test.raises(ImportError, modcol.collect) + py.test.raises(ImportError, modcol.collect) + py.test.raises(ImportError, modcol.run) + + def test_import_duplicate(self, testdir): + a = testdir.mkdir("a") + b = testdir.mkdir("b") + p = a.ensure("test_whatever.py") + p.pyimport() + del py.std.sys.modules['test_whatever'] + b.ensure("test_whatever.py") + result = testdir.runpytest() + s = result.stdout.str() + assert 'mismatch' in s + assert 'test_whatever' in s + + def test_syntax_error_in_module(self, testdir): + modcol = testdir.getmodulecol("this is a syntax error") + py.test.raises(SyntaxError, modcol.collect) + py.test.raises(SyntaxError, modcol.collect) + py.test.raises(SyntaxError, modcol.run) + + def test_module_considers_pluginmanager_at_import(self, testdir): + modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',") + py.test.raises(ImportError, "modcol.obj") + +class TestClass: + def test_class_with_init_not_collected(self, testdir): + modcol = testdir.getmodulecol(""" + class TestClass1: + def __init__(self): + pass + class TestClass2(object): + def __init__(self): + pass + """) + l = modcol.collect() + assert len(l) == 0 + +if py.std.sys.version_info > (3, 0): + _func_name_attr = "__name__" +else: + _func_name_attr = "func_name" + +class TestGenerator: + def test_generative_functions(self, testdir): + modcol = testdir.getmodulecol(""" + def func1(arg, arg2): + assert arg == arg2 + + def test_gen(): + yield func1, 17, 3*5 + yield func1, 42, 6*7 + """) + colitems = modcol.collect() + assert len(colitems) == 1 + gencol = colitems[0] + assert isinstance(gencol, py.test.collect.Generator) + gencolitems = gencol.collect() + assert len(gencolitems) == 2 + assert isinstance(gencolitems[0], py.test.collect.Function) + assert isinstance(gencolitems[1], py.test.collect.Function) + assert gencolitems[0].name == '[0]' + assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' + + def test_generative_methods(self, testdir): + modcol = testdir.getmodulecol(""" + def func1(arg, arg2): + assert arg == arg2 + class TestGenMethods: + def test_gen(self): + yield func1, 17, 3*5 + yield func1, 42, 6*7 + """) + gencol = modcol.collect()[0].collect()[0].collect()[0] + assert isinstance(gencol, py.test.collect.Generator) + gencolitems = gencol.collect() + assert len(gencolitems) == 2 + assert isinstance(gencolitems[0], py.test.collect.Function) + assert isinstance(gencolitems[1], py.test.collect.Function) + assert gencolitems[0].name == '[0]' + assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' + + def test_generative_functions_with_explicit_names(self, testdir): + modcol = testdir.getmodulecol(""" + def func1(arg, arg2): + assert arg == arg2 + + def test_gen(): + yield "seventeen", func1, 17, 3*5 + yield "fortytwo", func1, 42, 6*7 + """) + colitems = modcol.collect() + assert len(colitems) == 1 + gencol = colitems[0] + assert isinstance(gencol, py.test.collect.Generator) + gencolitems = gencol.collect() + assert len(gencolitems) == 2 + assert isinstance(gencolitems[0], py.test.collect.Function) + assert isinstance(gencolitems[1], py.test.collect.Function) + assert gencolitems[0].name == "['seventeen']" + assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' + assert gencolitems[1].name == "['fortytwo']" + assert getattr(gencolitems[1].obj, _func_name_attr) == 'func1' + + def test_generative_functions_unique_explicit_names(self, testdir): + # generative + modcol = testdir.getmodulecol(""" + def func(): pass + def test_gen(): + yield "name", func + yield "name", func + """) + colitems = modcol.collect() + assert len(colitems) == 1 + gencol = colitems[0] + assert isinstance(gencol, py.test.collect.Generator) + py.test.raises(ValueError, "gencol.collect()") + + def test_generative_methods_with_explicit_names(self, testdir): + modcol = testdir.getmodulecol(""" + def func1(arg, arg2): + assert arg == arg2 + class TestGenMethods: + def test_gen(self): + yield "m1", func1, 17, 3*5 + yield "m2", func1, 42, 6*7 + """) + gencol = modcol.collect()[0].collect()[0].collect()[0] + assert isinstance(gencol, py.test.collect.Generator) + gencolitems = gencol.collect() + assert len(gencolitems) == 2 + assert isinstance(gencolitems[0], py.test.collect.Function) + assert isinstance(gencolitems[1], py.test.collect.Function) + assert gencolitems[0].name == "['m1']" + assert getattr(gencolitems[0].obj, _func_name_attr) == 'func1' + assert gencolitems[1].name == "['m2']" + assert getattr(gencolitems[1].obj, _func_name_attr) == 'func1' + + def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): + o = testdir.makepyfile(""" + def test_generative_order_of_execution(): + import py + test_list = [] + expected_list = list(range(6)) + + def list_append(item): + test_list.append(item) + + def assert_order_of_execution(): + py.builtin.print_('expected order', expected_list) + py.builtin.print_('but got ', test_list) + assert test_list == expected_list + + for i in expected_list: + yield list_append, i + yield assert_order_of_execution + """) + reprec = testdir.inline_run(o) + passed, skipped, failed = reprec.countoutcomes() + assert passed == 7 + assert not skipped and not failed + + def test_order_of_execution_generator_different_codeline(self, testdir): + o = testdir.makepyfile(""" + def test_generative_tests_different_codeline(): + import py + test_list = [] + expected_list = list(range(3)) + + def list_append_2(): + test_list.append(2) + + def list_append_1(): + test_list.append(1) + + def list_append_0(): + test_list.append(0) + + def assert_order_of_execution(): + py.builtin.print_('expected order', expected_list) + py.builtin.print_('but got ', test_list) + assert test_list == expected_list + + yield list_append_0 + yield list_append_1 + yield list_append_2 + yield assert_order_of_execution + """) + reprec = testdir.inline_run(o) + passed, skipped, failed = reprec.countoutcomes() + assert passed == 4 + assert not skipped and not failed + +class TestFunction: + def test_getmodulecollector(self, testdir): + item = testdir.getitem("def test_func(): pass") + modcol = item.getparent(py.test.collect.Module) + assert isinstance(modcol, py.test.collect.Module) + assert hasattr(modcol.obj, 'test_func') + + def test_function_equality(self, testdir, tmpdir): + config = testdir.reparseconfig() + f1 = py.test.collect.Function(name="name", config=config, + args=(1,), callobj=isinstance) + f2 = py.test.collect.Function(name="name",config=config, + args=(1,), callobj=py.builtin.callable) + assert not f1 == f2 + assert f1 != f2 + f3 = py.test.collect.Function(name="name", config=config, + args=(1,2), callobj=py.builtin.callable) + assert not f3 == f2 + assert f3 != f2 + + assert not f3 == f1 + assert f3 != f1 + + f1_b = py.test.collect.Function(name="name", config=config, + args=(1,), callobj=isinstance) + assert f1 == f1_b + assert not f1 != f1_b + + def test_function_equality_with_callspec(self, testdir, tmpdir): + config = testdir.reparseconfig() + class callspec1: + param = 1 + funcargs = {} + id = "hello" + class callspec2: + param = 1 + funcargs = {} + id = "world" + f5 = py.test.collect.Function(name="name", config=config, + callspec=callspec1, callobj=isinstance) + f5b = py.test.collect.Function(name="name", config=config, + callspec=callspec2, callobj=isinstance) + assert f5 != f5b + assert not (f5 == f5b) + + def test_pyfunc_call(self, testdir): + item = testdir.getitem("def test_func(): raise ValueError") + config = item.config + class MyPlugin1: + def pytest_pyfunc_call(self, pyfuncitem): + raise ValueError + class MyPlugin2: + def pytest_pyfunc_call(self, pyfuncitem): + return True + config.pluginmanager.register(MyPlugin1()) + config.pluginmanager.register(MyPlugin2()) + config.hook.pytest_pyfunc_call(pyfuncitem=item) + +class TestSorting: + def test_check_equality(self, testdir): + modcol = testdir.getmodulecol(""" + def test_pass(): pass + def test_fail(): assert 0 + """) + fn1 = modcol.collect_by_name("test_pass") + assert isinstance(fn1, py.test.collect.Function) + fn2 = modcol.collect_by_name("test_pass") + assert isinstance(fn2, py.test.collect.Function) + + assert fn1 == fn2 + assert fn1 != modcol + if py.std.sys.version_info < (3, 0): + assert cmp(fn1, fn2) == 0 + assert hash(fn1) == hash(fn2) + + fn3 = modcol.collect_by_name("test_fail") + assert isinstance(fn3, py.test.collect.Function) + assert not (fn1 == fn3) + assert fn1 != fn3 + + for fn in fn1,fn2,fn3: + assert fn != 3 + assert fn != modcol + assert fn != [1,2,3] + assert [1,2,3] != fn + assert modcol != fn + + def test_allow_sane_sorting_for_decorators(self, testdir): + modcol = testdir.getmodulecol(""" + def dec(f): + g = lambda: f(2) + g.place_as = f + return g + + + def test_b(y): + pass + test_b = dec(test_b) + + def test_a(y): + pass + test_a = dec(test_a) + """) + colitems = modcol.collect() + assert len(colitems) == 2 + assert [item.name for item in colitems] == ['test_b', 'test_a'] + + +class TestConftestCustomization: + def test_pytest_pycollect_makeitem(self, testdir): + testdir.makeconftest(""" + import py + class MyFunction(py.test.collect.Function): + pass + def pytest_pycollect_makeitem(collector, name, obj): + if name == "some": + return MyFunction(name, collector) + """) + testdir.makepyfile("def some(): pass") + result = testdir.runpytest("--collectonly") + result.stdout.fnmatch_lines([ + "*MyFunction*some*", + ]) + + def test_makeitem_non_underscore(self, testdir, monkeypatch): + modcol = testdir.getmodulecol("def _hello(): pass") + l = [] + monkeypatch.setattr(py.test.collect.Module, 'makeitem', + lambda self, name, obj: l.append(name)) + modcol._buildname2items() + assert '_hello' not in l + + +class TestReportinfo: + + def test_func_reportinfo(self, testdir): + item = testdir.getitem("def test_func(): pass") + fspath, lineno, modpath = item.reportinfo() + assert fspath == item.fspath + assert lineno == 0 + assert modpath == "test_func" + + def test_class_reportinfo(self, testdir): + modcol = testdir.getmodulecol(""" + # lineno 0 + class TestClass: + def test_hello(self): pass + """) + classcol = modcol.collect_by_name("TestClass") + fspath, lineno, msg = classcol.reportinfo() + assert fspath == modcol.fspath + assert lineno == 1 + assert msg == "TestClass" + + def test_generator_reportinfo(self, testdir): + modcol = testdir.getmodulecol(""" + # lineno 0 + def test_gen(): + def check(x): + assert x + yield check, 3 + """) + gencol = modcol.collect_by_name("test_gen") + fspath, lineno, modpath = gencol.reportinfo() + assert fspath == modcol.fspath + assert lineno == 1 + assert modpath == "test_gen" + + genitem = gencol.collect()[0] + fspath, lineno, modpath = genitem.reportinfo() + assert fspath == modcol.fspath + assert lineno == 2 + assert modpath == "test_gen[0]" + """ + def test_func(): + pass + def test_genfunc(): + def check(x): + pass + yield check, 3 + class TestClass: + def test_method(self): + pass + """ + +def test_setup_only_available_in_subdir(testdir): + sub1 = testdir.mkpydir("sub1") + sub2 = testdir.mkpydir("sub2") + sub1.join("conftest.py").write(py.code.Source(""" + import py + def pytest_runtest_setup(item): + assert item.fspath.purebasename == "test_in_sub1" + def pytest_runtest_call(item): + assert item.fspath.purebasename == "test_in_sub1" + def pytest_runtest_teardown(item): + assert item.fspath.purebasename == "test_in_sub1" + """)) + sub2.join("conftest.py").write(py.code.Source(""" + import py + def pytest_runtest_setup(item): + assert item.fspath.purebasename == "test_in_sub2" + def pytest_runtest_call(item): + assert item.fspath.purebasename == "test_in_sub2" + def pytest_runtest_teardown(item): + assert item.fspath.purebasename == "test_in_sub2" + """)) + sub1.join("test_in_sub1.py").write("def test_1(): pass") + sub2.join("test_in_sub2.py").write("def test_2(): pass") + result = testdir.runpytest("-v", "-s") + result.stdout.fnmatch_lines([ + "*2 passed*" + ]) + +def test_generate_tests_only_done_in_subdir(testdir): + sub1 = testdir.mkpydir("sub1") + sub2 = testdir.mkpydir("sub2") + sub1.join("conftest.py").write(py.code.Source(""" + def pytest_generate_tests(metafunc): + assert metafunc.function.__name__ == "test_1" + """)) + sub2.join("conftest.py").write(py.code.Source(""" + def pytest_generate_tests(metafunc): + assert metafunc.function.__name__ == "test_2" + """)) + sub1.join("test_in_sub1.py").write("def test_1(): pass") + sub2.join("test_in_sub2.py").write("def test_2(): pass") + result = testdir.runpytest("-v", "-s", sub1, sub2, sub1) + result.stdout.fnmatch_lines([ + "*3 passed*" + ]) + +def test_modulecol_roundtrip(testdir): + modcol = testdir.getmodulecol("pass", withinit=True) + trail = modcol.config._rootcol.totrail(modcol) + newcol = modcol.config._rootcol.fromtrail(trail) + assert modcol.name == newcol.name + + +class TestTracebackCutting: + def test_skip_simple(self): + from py.impl.test.outcome import Skipped + excinfo = py.test.raises(Skipped, 'py.test.skip("xxx")') + assert excinfo.traceback[-1].frame.code.name == "skip" + assert excinfo.traceback[-1].ishidden() + + def test_traceback_argsetup(self, testdir): + testdir.makeconftest(""" + def pytest_funcarg__hello(request): + raise ValueError("xyz") + """) + p = testdir.makepyfile("def test(hello): pass") + result = testdir.runpytest(p) + assert result.ret != 0 + out = result.stdout.str() + assert out.find("xyz") != -1 + assert out.find("conftest.py:2: ValueError") != -1 + numentries = out.count("_ _ _") # separator for traceback entries + assert numentries == 0 + + result = testdir.runpytest("--fulltrace", p) + out = result.stdout.str() + assert out.find("conftest.py:2: ValueError") != -1 + numentries = out.count("_ _ _ _") # separator for traceback entries + assert numentries >3 --- a/testing/pytest/test_pluginmanager.py +++ /dev/null @@ -1,438 +0,0 @@ -import py, os -from py.impl.test.pluginmanager import PluginManager, canonical_importname -from py.impl.test.pluginmanager import Registry, MultiCall, HookRelay, varnames - - -class TestBootstrapping: - def test_consider_env_fails_to_import(self, monkeypatch): - pluginmanager = PluginManager() - monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") - py.test.raises(ImportError, "pluginmanager.consider_env()") - - def test_preparse_args(self): - pluginmanager = PluginManager() - py.test.raises(ImportError, """ - pluginmanager.consider_preparse(["xyz", "-p", "hello123"]) - """) - - def test_plugin_skip(self, testdir, monkeypatch): - p = testdir.makepyfile(pytest_skipping1=""" - import py - py.test.skip("hello") - """) - p.copy(p.dirpath("pytest_skipping2.py")) - monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") - result = testdir.runpytest("-p", "skipping1", "--traceconfig") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*hint*skipping2*hello*", - "*hint*skipping1*hello*", - ]) - - def test_consider_env_plugin_instantiation(self, testdir, monkeypatch): - pluginmanager = PluginManager() - testdir.syspathinsert() - testdir.makepyfile(pytest_xy123="#") - monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') - l1 = len(pluginmanager.getplugins()) - pluginmanager.consider_env() - l2 = len(pluginmanager.getplugins()) - assert l2 == l1 + 1 - assert pluginmanager.getplugin('pytest_xy123') - pluginmanager.consider_env() - l3 = len(pluginmanager.getplugins()) - assert l2 == l3 - - def test_consider_setuptools_instantiation(self, monkeypatch): - pkg_resources = py.test.importorskip("pkg_resources") - def my_iter(name): - assert name == "pytest11" - class EntryPoint: - name = "mytestplugin" - def load(self): - class PseudoPlugin: - x = 42 - return PseudoPlugin() - return iter([EntryPoint()]) - - monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) - pluginmanager = PluginManager() - pluginmanager.consider_setuptools_entrypoints() - plugin = pluginmanager.getplugin("mytestplugin") - assert plugin.x == 42 - plugin2 = pluginmanager.getplugin("pytest_mytestplugin") - assert plugin2 == plugin - - def test_consider_setuptools_not_installed(self, monkeypatch): - monkeypatch.setitem(py.std.sys.modules, 'pkg_resources', - py.std.types.ModuleType("pkg_resources")) - pluginmanager = PluginManager() - pluginmanager.consider_setuptools_entrypoints() - # ok, we did not explode - - def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): - x500 = testdir.makepyfile(pytest_x500="#") - p = testdir.makepyfile(""" - import py - def test_hello(): - plugin = py.test.config.pluginmanager.getplugin('x500') - assert plugin is not None - """) - monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") - result = testdir.runpytest(p) - assert result.ret == 0 - extra = result.stdout.fnmatch_lines(["*1 passed in*"]) - - def test_import_plugin_importname(self, testdir): - pluginmanager = PluginManager() - py.test.raises(ImportError, 'pluginmanager.import_plugin("x.y")') - py.test.raises(ImportError, 'pluginmanager.import_plugin("pytest_x.y")') - - reset = testdir.syspathinsert() - pluginname = "pytest_hello" - testdir.makepyfile(**{pluginname: ""}) - pluginmanager.import_plugin("hello") - len1 = len(pluginmanager.getplugins()) - pluginmanager.import_plugin("pytest_hello") - len2 = len(pluginmanager.getplugins()) - assert len1 == len2 - plugin1 = pluginmanager.getplugin("pytest_hello") - assert plugin1.__name__.endswith('pytest_hello') - plugin2 = pluginmanager.getplugin("hello") - assert plugin2 is plugin1 - - def test_consider_module(self, testdir): - pluginmanager = PluginManager() - testdir.syspathinsert() - testdir.makepyfile(pytest_plug1="#") - testdir.makepyfile(pytest_plug2="#") - mod = py.std.types.ModuleType("temp") - mod.pytest_plugins = ["pytest_plug1", "pytest_plug2"] - pluginmanager.consider_module(mod) - assert pluginmanager.getplugin("plug1").__name__ == "pytest_plug1" - assert pluginmanager.getplugin("plug2").__name__ == "pytest_plug2" - - def test_consider_module_import_module(self, testdir): - mod = py.std.types.ModuleType("x") - mod.pytest_plugins = "pytest_a" - aplugin = testdir.makepyfile(pytest_a="#") - pluginmanager = PluginManager() - reprec = testdir.getreportrecorder(pluginmanager) - #syspath.prepend(aplugin.dirpath()) - py.std.sys.path.insert(0, str(aplugin.dirpath())) - pluginmanager.consider_module(mod) - call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name) - assert call.plugin.__name__ == "pytest_a" - - # check that it is not registered twice - pluginmanager.consider_module(mod) - l = reprec.getcalls("pytest_plugin_registered") - assert len(l) == 1 - - def test_consider_conftest_deprecated(self, testdir): - pp = PluginManager() - mod = testdir.makepyfile("class ConftestPlugin: pass").pyimport() - call = py.test.raises(ValueError, pp.consider_conftest, mod) - - def test_config_sets_conftesthandle_onimport(self, testdir): - config = testdir.parseconfig([]) - assert config._conftest._onimport == config._onimportconftest - - def test_consider_conftest_deps(self, testdir): - mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() - pp = PluginManager() - py.test.raises(ImportError, "pp.consider_conftest(mod)") - - def test_registry(self): - pp = PluginManager() - class A: pass - a1, a2 = A(), A() - pp.register(a1) - assert pp.isregistered(a1) - pp.register(a2, "hello") - assert pp.isregistered(a2) - l = pp.getplugins() - assert a1 in l - assert a2 in l - assert pp.getplugin('hello') == a2 - pp.unregister(a1) - assert not pp.isregistered(a1) - pp.unregister(a2) - assert not pp.isregistered(a2) - - def test_register_imported_modules(self): - pp = PluginManager() - mod = py.std.types.ModuleType("x.y.pytest_hello") - pp.register(mod) - assert pp.isregistered(mod) - l = pp.getplugins() - assert mod in l - py.test.raises(AssertionError, "pp.register(mod)") - mod2 = py.std.types.ModuleType("pytest_hello") - #pp.register(mod2) # double registry - py.test.raises(AssertionError, "pp.register(mod)") - #assert not pp.isregistered(mod2) - assert pp.getplugins() == l - - def test_canonical_import(self, monkeypatch): - mod = py.std.types.ModuleType("pytest_xyz") - monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) - pp = PluginManager() - pp.import_plugin('xyz') - assert pp.getplugin('xyz') == mod - assert pp.getplugin('pytest_xyz') == mod - assert pp.isregistered(mod) - - def test_register_mismatch_method(self): - pp = PluginManager() - class hello: - def pytest_gurgel(self): - pass - py.test.raises(Exception, "pp.register(hello())") - - def test_register_mismatch_arg(self): - pp = PluginManager() - class hello: - def pytest_configure(self, asd): - pass - excinfo = py.test.raises(Exception, "pp.register(hello())") - - def test_canonical_importname(self): - for name in 'xyz', 'pytest_xyz', 'pytest_Xyz', 'Xyz': - impname = canonical_importname(name) - -class TestPytestPluginInteractions: - def test_do_option_conftestplugin(self, testdir): - from py.impl.test.config import Config - p = testdir.makepyfile(""" - def pytest_addoption(parser): - parser.addoption('--test123', action="store_true") - """) - config = Config() - config._conftest.importconftest(p) - print(config.pluginmanager.getplugins()) - config.parse([]) - assert not config.option.test123 - - def test_do_ext_namespace(self, testdir): - testdir.makeconftest(""" - def pytest_namespace(): - return {'hello': 'world'} - """) - p = testdir.makepyfile(""" - from py.test import hello - import py - def test_hello(): - assert hello == "world" - assert 'hello' in py.test.__all__ - """) - result = testdir.runpytest(p) - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) - - def test_do_option_postinitialize(self, testdir): - from py.impl.test.config import Config - config = Config() - config.parse([]) - config.pluginmanager.do_configure(config=config) - assert not hasattr(config.option, 'test123') - p = testdir.makepyfile(""" - def pytest_addoption(parser): - parser.addoption('--test123', action="store_true", - default=True) - """) - config._conftest.importconftest(p) - assert config.option.test123 - - def test_configure(self, testdir): - config = testdir.parseconfig() - l = [] - class A: - def pytest_configure(self, config): - l.append(self) - - config.pluginmanager.register(A()) - assert len(l) == 0 - config.pluginmanager.do_configure(config=config) - assert len(l) == 1 - config.pluginmanager.register(A()) # this should lead to a configured() plugin - assert len(l) == 2 - assert l[0] != l[1] - - config.pluginmanager.do_unconfigure(config=config) - config.pluginmanager.register(A()) - assert len(l) == 2 - - # lower level API - - def test_listattr(self): - pluginmanager = PluginManager() - class My2: - x = 42 - pluginmanager.register(My2()) - assert not pluginmanager.listattr("hello") - assert pluginmanager.listattr("x") == [42] - -def test_namespace_has_default_and_env_plugins(testdir): - p = testdir.makepyfile(""" - import py - py.test.mark - """) - result = testdir.runpython(p) - assert result.ret == 0 - -def test_varnames(): - def f(x): - pass - class A: - def f(self, y): - pass - assert varnames(f) == ("x",) - assert varnames(A().f) == ('y',) - -class TestMultiCall: - def test_uses_copy_of_methods(self): - l = [lambda: 42] - mc = MultiCall(l, {}) - repr(mc) - l[:] = [] - res = mc.execute() - return res == 42 - - def test_call_passing(self): - class P1: - def m(self, __multicall__, x): - assert len(__multicall__.results) == 1 - assert not __multicall__.methods - return 17 - - class P2: - def m(self, __multicall__, x): - assert __multicall__.results == [] - assert __multicall__.methods - return 23 - - p1 = P1() - p2 = P2() - multicall = MultiCall([p1.m, p2.m], {'x': 23}) - assert "23" in repr(multicall) - reslist = multicall.execute() - assert len(reslist) == 2 - # ensure reversed order - assert reslist == [23, 17] - - def test_keyword_args(self): - def f(x): - return x + 1 - class A: - def f(self, x, y): - return x + y - multicall = MultiCall([f, A().f], dict(x=23, y=24)) - assert "'x': 23" in repr(multicall) - assert "'y': 24" in repr(multicall) - reslist = multicall.execute() - assert reslist == [24+23, 24] - assert "2 results" in repr(multicall) - - def test_keywords_call_error(self): - multicall = MultiCall([lambda x: x], {}) - py.test.raises(TypeError, "multicall.execute()") - - def test_call_subexecute(self): - def m(__multicall__): - subresult = __multicall__.execute() - return subresult + 1 - - def n(): - return 1 - - call = MultiCall([n, m], {}, firstresult=True) - res = call.execute() - assert res == 2 - - def test_call_none_is_no_result(self): - def m1(): - return 1 - def m2(): - return None - res = MultiCall([m1, m2], {}, firstresult=True).execute() - assert res == 1 - res = MultiCall([m1, m2], {}).execute() - assert res == [1] - -class TestRegistry: - - def test_register(self): - registry = Registry() - class MyPlugin: - pass - my = MyPlugin() - registry.register(my) - assert list(registry) == [my] - my2 = MyPlugin() - registry.register(my2) - assert list(registry) == [my, my2] - - assert registry.isregistered(my) - assert registry.isregistered(my2) - registry.unregister(my) - assert not registry.isregistered(my) - assert list(registry) == [my2] - - def test_listattr(self): - plugins = Registry() - class api1: - x = 41 - class api2: - x = 42 - class api3: - x = 43 - plugins.register(api1()) - plugins.register(api2()) - plugins.register(api3()) - l = list(plugins.listattr('x')) - assert l == [41, 42, 43] - l = list(plugins.listattr('x', reverse=True)) - assert l == [43, 42, 41] - -class TestHookRelay: - def test_happypath(self): - registry = Registry() - class Api: - def hello(self, arg): - pass - - mcm = HookRelay(hookspecs=Api, registry=registry) - assert hasattr(mcm, 'hello') - assert repr(mcm.hello).find("hello") != -1 - class Plugin: - def hello(self, arg): - return arg + 1 - registry.register(Plugin()) - l = mcm.hello(arg=3) - assert l == [4] - assert not hasattr(mcm, 'world') - - def test_only_kwargs(self): - registry = Registry() - class Api: - def hello(self, arg): - pass - mcm = HookRelay(hookspecs=Api, registry=registry) - py.test.raises(TypeError, "mcm.hello(3)") - - def test_firstresult_definition(self): - registry = Registry() - class Api: - def hello(self, arg): pass - hello.firstresult = True - - mcm = HookRelay(hookspecs=Api, registry=registry) - class Plugin: - def hello(self, arg): - return arg + 1 - registry.register(Plugin()) - res = mcm.hello(arg=3) - assert res == 4 - --- a/testing/pytest/acceptance_test.py +++ /dev/null @@ -1,91 +0,0 @@ -import sys, py - -class TestGeneralUsage: - def test_config_error(self, testdir): - testdir.makeconftest(""" - def pytest_configure(config): - raise config.Error("hello") - """) - result = testdir.runpytest(testdir.tmpdir) - assert result.ret != 0 - assert result.stderr.fnmatch_lines([ - '*ERROR: hello' - ]) - - def test_config_preparse_plugin_option(self, testdir): - testdir.makepyfile(pytest_xyz=""" - def pytest_addoption(parser): - parser.addoption("--xyz", dest="xyz", action="store") - """) - testdir.makepyfile(test_one=""" - import py - def test_option(): - assert py.test.config.option.xyz == "123" - """) - result = testdir.runpytest("-p", "xyz", "--xyz=123") - assert result.ret == 0 - assert result.stdout.fnmatch_lines([ - '*1 passed*', - ]) - - def test_basetemp(self, testdir): - mytemp = testdir.tmpdir.mkdir("mytemp") - p = testdir.makepyfile(""" - import py - def test_1(pytestconfig): - pytestconfig.getbasetemp().ensure("hello") - """) - result = testdir.runpytest(p, '--basetemp=%s' %mytemp) - assert result.ret == 0 - assert mytemp.join('hello').check() - - def test_assertion_magic(self, testdir): - p = testdir.makepyfile(""" - def test_this(): - x = 0 - assert x - """) - result = testdir.runpytest(p) - extra = result.stdout.fnmatch_lines([ - "> assert x", - "E assert 0", - ]) - assert result.ret == 1 - - def test_nested_import_error(self, testdir): - p = testdir.makepyfile(""" - import import_fails - def test_this(): - assert import_fails.a == 1 - """) - testdir.makepyfile(import_fails="import does_not_work") - result = testdir.runpytest(p) - extra = result.stdout.fnmatch_lines([ - #XXX on jython this fails: "> import import_fails", - "E ImportError: No module named does_not_work", - ]) - assert result.ret == 1 - - def test_not_collectable_arguments(self, testdir): - p1 = testdir.makepyfile("") - p2 = testdir.makefile(".pyc", "123") - result = testdir.runpytest(p1, p2) - assert result.ret != 0 - assert result.stderr.fnmatch_lines([ - "*ERROR: can't collect: %s" %(p2,) - ]) - - - def test_earlyinit(self, testdir): - p = testdir.makepyfile(""" - import py - assert hasattr(py.test, 'mark') - """) - result = testdir.runpython(p) - assert result.ret == 0 - - def test_pydoc(self, testdir): - result = testdir.runpython_c("import py ; help(py.test)") - assert result.ret == 0 - s = result.stdout.str() - assert 'MarkGenerator' in s --- /dev/null +++ b/testing/test_compat.py @@ -0,0 +1,53 @@ +from __future__ import generators +import py +from py.impl.test.compat import TestCase +from py.impl.test.outcome import Failed + +class TestCompatTestCaseSetupSemantics(TestCase): + globlist = [] + + def setUp(self): + self.__dict__.setdefault('l', []).append(42) + self.globlist.append(self) + + def tearDown(self): + self.l.pop() + + def test_issetup(self): + l = self.l + assert len(l) == 1 + assert l[-1] == 42 + #self.checkmultipleinstances() + + def test_issetup2(self): + l = self.l + assert len(l) == 1 + assert l[-1] == 42 + #self.checkmultipleinstances() + + #def checkmultipleinstances(self): + # for x,y in zip(self.globlist, self.globlist[1:]): + # assert x is not y + +class TestCompatAssertions(TestCase): + nameparamdef = { + 'failUnlessEqual,assertEqual,assertEquals': ('1, 1', '1, 0'), + 'assertNotEquals,failIfEqual': ('0, 1', '0,0'), + 'failUnless,assert_': ('1', 'None'), + 'failIf': ('0', '1'), + } + + sourcelist = [] + for names, (paramok, paramfail) in nameparamdef.items(): + for name in names.split(','): + source = """ + def test_%(name)s(self): + self.%(name)s(%(paramok)s) + #self.%(name)s(%(paramfail)s) + + def test_%(name)s_failing(self): + self.assertRaises(Failed, + self.%(name)s, %(paramfail)s) + """ % locals() + co = py.code.Source(source).compile() + exec(co) --- a/testing/pytest/test_recording.py +++ /dev/null @@ -1,42 +0,0 @@ -import py,sys - -class TestRecordingAccept: - def test_recording_and_back(self, testdir): - py.test.skip("implementation missing: recording") - p = testdir.makepyfile(""" - import py - def test_fail(): - assert x - def test_skip(): - py.test.skip("hello") - def test_pass(): - pass - """) - rdir = py.path.local("rdir") - result = self.runpytest(p, "--record=%s" %(rdir)) - record = py.test.RecordDir(result) - testrun = record.getlastrun() - assert testrun.sys.platform == sys.platform - assert testrun.sys.version_info == sys.version_info - assert testrun.sys.executable == sys.executable - - baseadress = ("test_one.py",) - failures = testrun.getfailures() - assert len(failures) == 1 - failure = failures[0] - assert failure.testaddress == baseadress + ("test_fail",) - assert failure.location.find("test_one.py:3") != -1 - assert failure.errmessage - assert failure.reprfailure # probably just a string for now - - skipped = testrun.getskipped() - assert len(skipped) == 1 - skip = skipped[0] - assert skip.testaddress == baseaddress + ("test_skip",) - assert skip.location == "test_one.py:7" - - passed = testrun.getpassed() - assert len(passed) == 1 - p = passed[0] - assert p.testaddress == baseaddress + ("test_skip",) - --- a/testing/pytest/test_funcargs.py +++ /dev/null @@ -1,544 +0,0 @@ -import py, sys -from py.impl.test import funcargs - -def test_getfuncargnames(): - def f(): pass - assert not funcargs.getfuncargnames(f) - def g(arg): pass - assert funcargs.getfuncargnames(g) == ['arg'] - def h(arg1, arg2="hello"): pass - assert funcargs.getfuncargnames(h) == ['arg1'] - def h(arg1, arg2, arg3="hello"): pass - assert funcargs.getfuncargnames(h) == ['arg1', 'arg2'] - class A: - def f(self, arg1, arg2="hello"): - pass - assert funcargs.getfuncargnames(A().f) == ['arg1'] - if sys.version_info < (3,0): - assert funcargs.getfuncargnames(A.f) == ['arg1'] - -def test_callspec_repr(): - cs = funcargs.CallSpec({}, 'hello', 1) - repr(cs) - cs = funcargs.CallSpec({}, 'hello', funcargs._notexists) - repr(cs) - -class TestFillFuncArgs: - def test_funcarg_lookupfails(self, testdir): - testdir.makeconftest(""" - def pytest_funcarg__xyzsomething(request): - return 42 - """) - item = testdir.getitem("def test_func(some): pass") - exc = py.test.raises(LookupError, "funcargs.fillfuncargs(item)") - s = str(exc.value) - assert s.find("xyzsomething") != -1 - - def test_funcarg_lookup_default(self, testdir): - item = testdir.getitem("def test_func(some, other=42): pass") - class Provider: - def pytest_funcarg__some(self, request): - return request.function.__name__ - item.config.pluginmanager.register(Provider()) - funcargs.fillfuncargs(item) - assert len(item.funcargs) == 1 - - def test_funcarg_basic(self, testdir): - item = testdir.getitem("def test_func(some, other): pass") - class Provider: - def pytest_funcarg__some(self, request): - return request.function.__name__ - def pytest_funcarg__other(self, request): - return 42 - item.config.pluginmanager.register(Provider()) - funcargs.fillfuncargs(item) - assert len(item.funcargs) == 2 - assert item.funcargs['some'] == "test_func" - assert item.funcargs['other'] == 42 - - def test_funcarg_lookup_modulelevel(self, testdir): - modcol = testdir.getmodulecol(""" - def pytest_funcarg__something(request): - return request.function.__name__ - - class TestClass: - def test_method(self, something): - pass - def test_func(something): - pass - """) - item1, item2 = testdir.genitems([modcol]) - funcargs.fillfuncargs(item1) - assert item1.funcargs['something'] == "test_method" - funcargs.fillfuncargs(item2) - assert item2.funcargs['something'] == "test_func" - - def test_funcarg_lookup_classlevel(self, testdir): - p = testdir.makepyfile(""" - class TestClass: - def pytest_funcarg__something(self, request): - return request.instance - def test_method(self, something): - assert something is self - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines([ - "*1 passed*" - ]) - - def test_fillfuncargs_exposed(self, testdir): - item = testdir.getitem("def test_func(some, other=42): pass") - class Provider: - def pytest_funcarg__some(self, request): - return request.function.__name__ - item.config.pluginmanager.register(Provider()) - if hasattr(item, '_args'): - del item._args - py.test.collect._fillfuncargs(item) - assert len(item.funcargs) == 1 - -class TestRequest: - def test_request_attributes(self, testdir): - item = testdir.getitem(""" - def pytest_funcarg__something(request): pass - def test_func(something): pass - """) - req = funcargs.FuncargRequest(item) - assert req.function == item.obj - assert hasattr(req.module, 'test_func') - assert req.cls is None - assert req.function.__name__ == "test_func" - assert req.config == item.config - assert repr(req).find(req.function.__name__) != -1 - - def test_request_attributes_method(self, testdir): - item, = testdir.getitems(""" - class TestB: - def test_func(self, something): - pass - """) - req = funcargs.FuncargRequest(item) - assert req.cls.__name__ == "TestB" - assert req.instance.__class__ == req.cls - - def XXXtest_request_contains_funcarg_name2factory(self, testdir): - modcol = testdir.getmodulecol(""" - def pytest_funcarg__something(request): - pass - class TestClass: - def test_method(self, something): - pass - """) - item1, = testdir.genitems([modcol]) - assert item1.name == "test_method" - name2factory = funcargs.FuncargRequest(item1)._name2factory - assert len(name2factory) == 1 - assert name2factory[0].__name__ == "pytest_funcarg__something" - - def test_getfuncargvalue_recursive(self, testdir): - testdir.makeconftest(""" - def pytest_funcarg__something(request): - return 1 - """) - item = testdir.getitem(""" - def pytest_funcarg__something(request): - return request.getfuncargvalue("something") + 1 - def test_func(something): - assert something == 2 - """) - req = funcargs.FuncargRequest(item) - val = req.getfuncargvalue("something") - assert val == 2 - - def test_getfuncargvalue(self, testdir): - item = testdir.getitem(""" - l = [2] - def pytest_funcarg__something(request): return 1 - def pytest_funcarg__other(request): - return l.pop() - def test_func(something): pass - """) - req = funcargs.FuncargRequest(item) - py.test.raises(req.Error, req.getfuncargvalue, "notexists") - val = req.getfuncargvalue("something") - assert val == 1 - val = req.getfuncargvalue("something") - assert val == 1 - val2 = req.getfuncargvalue("other") - assert val2 == 2 - val2 = req.getfuncargvalue("other") # see about caching - assert val2 == 2 - req._fillfuncargs() - assert item.funcargs == {'something': 1} - - def test_request_addfinalizer(self, testdir): - item = testdir.getitem(""" - teardownlist = [] - def pytest_funcarg__something(request): - request.addfinalizer(lambda: teardownlist.append(1)) - def test_func(something): pass - """) - req = funcargs.FuncargRequest(item) - req.config._setupstate.prepare(item) # XXX - req._fillfuncargs() - # successively check finalization calls - teardownlist = item.getparent(py.test.collect.Module).obj.teardownlist - ss = item.config._setupstate - assert not teardownlist - ss.teardown_exact(item) - print(ss.stack) - assert teardownlist == [1] - - def test_request_addfinalizer_partial_setup_failure(self, testdir): - p = testdir.makepyfile(""" - l = [] - def pytest_funcarg__something(request): - request.addfinalizer(lambda: l.append(None)) - def test_func(something, missingarg): - pass - def test_second(): - assert len(l) == 1 - """) - result = testdir.runpytest(p) - assert result.stdout.fnmatch_lines([ - "*1 passed*1 error*" - ]) - - def test_request_getmodulepath(self, testdir): - modcol = testdir.getmodulecol("def test_somefunc(): pass") - item, = testdir.genitems([modcol]) - req = funcargs.FuncargRequest(item) - assert req.fspath == modcol.fspath - -class TestRequestCachedSetup: - def test_request_cachedsetup(self, testdir): - item1,item2 = testdir.getitems(""" - class TestClass: - def test_func1(self, something): - pass - def test_func2(self, something): - pass - """) - req1 = funcargs.FuncargRequest(item1) - l = ["hello"] - def setup(): - return l.pop() - ret1 = req1.cached_setup(setup) - assert ret1 == "hello" - ret1b = req1.cached_setup(setup) - assert ret1 == ret1b - req2 = funcargs.FuncargRequest(item2) - ret2 = req2.cached_setup(setup) - assert ret2 == ret1 - - def test_request_cachedsetup_extrakey(self, testdir): - item1 = testdir.getitem("def test_func(): pass") - req1 = funcargs.FuncargRequest(item1) - l = ["hello", "world"] - def setup(): - return l.pop() - ret1 = req1.cached_setup(setup, extrakey=1) - ret2 = req1.cached_setup(setup, extrakey=2) - assert ret2 == "hello" - assert ret1 == "world" - ret1b = req1.cached_setup(setup, extrakey=1) - ret2b = req1.cached_setup(setup, extrakey=2) - assert ret1 == ret1b - assert ret2 == ret2b - - def test_request_cachedsetup_cache_deletion(self, testdir): - item1 = testdir.getitem("def test_func(): pass") - req1 = funcargs.FuncargRequest(item1) - l = [] - def setup(): - l.append("setup") - def teardown(val): - l.append("teardown") - ret1 = req1.cached_setup(setup, teardown, scope="function") - assert l == ['setup'] - # artificial call of finalizer - req1.config._setupstate._callfinalizers(item1) - assert l == ["setup", "teardown"] - ret2 = req1.cached_setup(setup, teardown, scope="function") - assert l == ["setup", "teardown", "setup"] - req1.config._setupstate._callfinalizers(item1) - assert l == ["setup", "teardown", "setup", "teardown"] - - def test_request_cached_setup_two_args(self, testdir): - testdir.makepyfile(""" - def pytest_funcarg__arg1(request): - return request.cached_setup(lambda: 42) - def pytest_funcarg__arg2(request): - return request.cached_setup(lambda: 17) - def test_two_different_setups(arg1, arg2): - assert arg1 != arg2 - """) - result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) - - def test_request_cached_setup_getfuncargvalue(self, testdir): - testdir.makepyfile(""" - def pytest_funcarg__arg1(request): - arg1 = request.getfuncargvalue("arg2") - return request.cached_setup(lambda: arg1 + 1) - def pytest_funcarg__arg2(request): - return request.cached_setup(lambda: 10) - def test_two_funcarg(arg1): - assert arg1 == 11 - """) - result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) - - def test_request_cached_setup_functional(self, testdir): - testdir.makepyfile(test_0=""" - l = [] - def pytest_funcarg__something(request): - val = request.cached_setup(setup, teardown) - return val - def setup(mycache=[1]): - l.append(mycache.pop()) - return l - def teardown(something): - l.remove(something[0]) - l.append(2) - def test_list_once(something): - assert something == [1] - def test_list_twice(something): - assert something == [1] - """) - testdir.makepyfile(test_1=""" - import test_0 # should have run already - def test_check_test0_has_teardown_correct(): - assert test_0.l == [2] - """) - result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*3 passed*" - ]) - -class TestMetafunc: - def test_no_funcargs(self, testdir): - def function(): pass - metafunc = funcargs.Metafunc(function) - assert not metafunc.funcargnames - - def test_function_basic(self): - def func(arg1, arg2="qwe"): pass - metafunc = funcargs.Metafunc(func) - assert len(metafunc.funcargnames) == 1 - assert 'arg1' in metafunc.funcargnames - assert metafunc.function is func - assert metafunc.cls is None - - def test_addcall_no_args(self): - def func(arg1): pass - metafunc = funcargs.Metafunc(func) - metafunc.addcall() - assert len(metafunc._calls) == 1 - call = metafunc._calls[0] - assert call.id == "0" - assert not hasattr(call, 'param') - - def test_addcall_id(self): - def func(arg1): pass - metafunc = funcargs.Metafunc(func) - py.test.raises(ValueError, "metafunc.addcall(id=None)") - - metafunc.addcall(id=1) - py.test.raises(ValueError, "metafunc.addcall(id=1)") - py.test.raises(ValueError, "metafunc.addcall(id='1')") - metafunc.addcall(id=2) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].id == "1" - assert metafunc._calls[1].id == "2" - - def test_addcall_param(self): - def func(arg1): pass - metafunc = funcargs.Metafunc(func) - class obj: pass - metafunc.addcall(param=obj) - metafunc.addcall(param=obj) - metafunc.addcall(param=1) - assert len(metafunc._calls) == 3 - assert metafunc._calls[0].param == obj - assert metafunc._calls[1].param == obj - assert metafunc._calls[2].param == 1 - - def test_addcall_funcargs(self): - def func(arg1): pass - metafunc = funcargs.Metafunc(func) - class obj: pass - metafunc.addcall(funcargs={"x": 2}) - metafunc.addcall(funcargs={"x": 3}) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {'x': 2} - assert metafunc._calls[1].funcargs == {'x': 3} - assert not hasattr(metafunc._calls[1], 'param') - -class TestGenfuncFunctional: - def test_attributes(self, testdir): - p = testdir.makepyfile(""" - # assumes that generate/provide runs in the same process - import py - def pytest_generate_tests(metafunc): - metafunc.addcall(param=metafunc) - - def pytest_funcarg__metafunc(request): - assert request._pyfuncitem._genid == "0" - return request.param - - def test_function(metafunc): - assert metafunc.config == py.test.config - assert metafunc.module.__name__ == __name__ - assert metafunc.function == test_function - assert metafunc.cls is None - - class TestClass: - def test_method(self, metafunc): - assert metafunc.config == py.test.config - assert metafunc.module.__name__ == __name__ - if py.std.sys.version_info > (3, 0): - unbound = TestClass.test_method - else: - unbound = TestClass.test_method.im_func - # XXX actually have an unbound test function here? - assert metafunc.function == unbound - assert metafunc.cls == TestClass - """) - result = testdir.runpytest(p, "-v") - result.stdout.fnmatch_lines([ - "*2 passed in*", - ]) - - def test_addcall_with_two_funcargs_generators(self, testdir): - testdir.makeconftest(""" - def pytest_generate_tests(metafunc): - assert "arg1" in metafunc.funcargnames - metafunc.addcall(funcargs=dict(arg1=1, arg2=2)) - """) - p = testdir.makepyfile(""" - def pytest_generate_tests(metafunc): - metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) - - class TestClass: - def test_myfunc(self, arg1, arg2): - assert arg1 == arg2 - """) - result = testdir.runpytest("-v", p) - assert result.stdout.fnmatch_lines([ - "*test_myfunc*0*PASS*", - "*test_myfunc*1*FAIL*", - "*1 failed, 1 passed*" - ]) - - def test_two_functions(self, testdir): - p = testdir.makepyfile(""" - def pytest_generate_tests(metafunc): - metafunc.addcall(param=10) - metafunc.addcall(param=20) - - def pytest_funcarg__arg1(request): - return request.param - - def test_func1(arg1): - assert arg1 == 10 - def test_func2(arg1): - assert arg1 in (10, 20) - """) - result = testdir.runpytest("-v", p) - assert result.stdout.fnmatch_lines([ - "*test_func1*0*PASS*", - "*test_func1*1*FAIL*", - "*test_func2*PASS*", - "*1 failed, 3 passed*" - ]) - - def test_generate_plugin_and_module(self, testdir): - testdir.makeconftest(""" - def pytest_generate_tests(metafunc): - assert "arg1" in metafunc.funcargnames - metafunc.addcall(id="world", param=(2,100)) - """) - p = testdir.makepyfile(""" - def pytest_generate_tests(metafunc): - metafunc.addcall(param=(1,1), id="hello") - - def pytest_funcarg__arg1(request): - return request.param[0] - def pytest_funcarg__arg2(request): - return request.param[1] - - class TestClass: - def test_myfunc(self, arg1, arg2): - assert arg1 == arg2 - """) - result = testdir.runpytest("-v", p) - assert result.stdout.fnmatch_lines([ - "*test_myfunc*hello*PASS*", - "*test_myfunc*world*FAIL*", - "*1 failed, 1 passed*" - ]) - - def test_generate_tests_in_class(self, testdir): - p = testdir.makepyfile(""" - class TestClass: - def pytest_generate_tests(self, metafunc): - metafunc.addcall(funcargs={'hello': 'world'}, id="hello") - - def test_myfunc(self, hello): - assert hello == "world" - """) - result = testdir.runpytest("-v", p) - assert result.stdout.fnmatch_lines([ - "*test_myfunc*hello*PASS*", - "*1 passed*" - ]) - - -def test_conftest_funcargs_only_available_in_subdir(testdir): - sub1 = testdir.mkpydir("sub1") - sub2 = testdir.mkpydir("sub2") - sub1.join("conftest.py").write(py.code.Source(""" - import py - def pytest_funcarg__arg1(request): - py.test.raises(Exception, "request.getfuncargvalue('arg2')") - """)) - sub2.join("conftest.py").write(py.code.Source(""" - import py - def pytest_funcarg__arg2(request): - py.test.raises(Exception, "request.getfuncargvalue('arg1')") - """)) - - sub1.join("test_in_sub1.py").write("def test_1(arg1): pass") - sub2.join("test_in_sub2.py").write("def test_2(arg2): pass") - result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "*2 passed*" - ]) - -def test_funcarg_non_pycollectobj(testdir): # rough jstests usage - testdir.makeconftest(""" - import py - def pytest_pycollect_makeitem(collector, name, obj): - if name == "MyClass": - return MyCollector(name, parent=collector) - class MyCollector(py.test.collect.Collector): - def reportinfo(self): - return self.fspath, 3, "xyz" - """) - modcol = testdir.getmodulecol(""" - def pytest_funcarg__arg1(request): - return 42 - class MyClass: - pass - """) - clscol = modcol.collect()[0] - clscol.obj = lambda arg1: None - clscol.funcargs = {} - funcargs.fillfuncargs(clscol) - assert clscol.funcargs['arg1'] == 42 - From commits-noreply at bitbucket.org Wed Jan 13 17:24:57 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 16:24:57 +0000 (UTC) Subject: [py-svn] py-trunk commit 92022b94ebd9: move down py/impl/XYZ to py/_XYZ Message-ID: <20100113162457.DC1287EE7A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263399354 -3600 # Node ID 92022b94ebd9777ecbf318bc77a1432e91f62ea0 # Parent 626e56a0236940356418324bf7d10e5e4ea63631 move down py/impl/XYZ to py/_XYZ --- /dev/null +++ b/py/_code/code.py @@ -0,0 +1,768 @@ +import py +import sys + +builtin_repr = repr + +repr = py.builtin._tryimport('repr', 'reprlib') + +class Code(object): + """ wrapper around Python code objects """ + def __init__(self, rawcode): + rawcode = py.code.getrawcode(rawcode) + self.raw = rawcode + try: + self.filename = rawcode.co_filename + self.firstlineno = rawcode.co_firstlineno - 1 + self.name = rawcode.co_name + except AttributeError: + raise TypeError("not a code object: %r" %(rawcode,)) + + def __eq__(self, other): + return self.raw == other.raw + + def __ne__(self, other): + return not self == other + + def new(self, rec=False, **kwargs): + """ return new code object with modified attributes. + if rec-cursive is true then dive into code + objects contained in co_consts. + """ + if sys.platform.startswith("java"): + # XXX jython does not support the below co_filename hack + return self.raw + names = [x for x in dir(self.raw) if x[:3] == 'co_'] + for name in kwargs: + if name not in names: + raise TypeError("unknown code attribute: %r" %(name, )) + if rec and hasattr(self.raw, 'co_consts'): # jython + newconstlist = [] + co = self.raw + cotype = type(co) + for c in co.co_consts: + if isinstance(c, cotype): + c = self.__class__(c).new(rec=True, **kwargs) + newconstlist.append(c) + return self.new(rec=False, co_consts=tuple(newconstlist), **kwargs) + for name in names: + if name not in kwargs: + kwargs[name] = getattr(self.raw, name) + arglist = [ + kwargs['co_argcount'], + kwargs['co_nlocals'], + kwargs.get('co_stacksize', 0), # jython + kwargs.get('co_flags', 0), # jython + kwargs.get('co_code', ''), # jython + kwargs.get('co_consts', ()), # jython + kwargs.get('co_names', []), # + kwargs['co_varnames'], + kwargs['co_filename'], + kwargs['co_name'], + kwargs['co_firstlineno'], + kwargs.get('co_lnotab', ''), #jython + kwargs.get('co_freevars', None), #jython + kwargs.get('co_cellvars', None), # jython + ] + if sys.version_info >= (3,0): + arglist.insert(1, kwargs['co_kwonlyargcount']) + return self.raw.__class__(*arglist) + else: + return py.std.new.code(*arglist) + + def path(self): + """ return a py.path.local object pointing to the source code """ + fn = self.raw.co_filename + try: + return fn.__path__ + except AttributeError: + p = py.path.local(self.raw.co_filename) + if not p.check(file=1): + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + p = self.raw.co_filename + return p + + path = property(path, None, None, "path of this code object") + + def fullsource(self): + """ return a py.code.Source object for the full source file of the code + """ + from py._code import source + full, _ = source.findsource(self.raw) + return full + fullsource = property(fullsource, None, None, + "full source containing this code object") + + def source(self): + """ return a py.code.Source object for the code object's source only + """ + # return source only for that part of code + return py.code.Source(self.raw) + + def getargs(self): + """ return a tuple with the argument names for the code object + """ + # handfull shortcut for getting args + raw = self.raw + return raw.co_varnames[:raw.co_argcount] + +class Frame(object): + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + def __init__(self, frame): + self.code = py.code.Code(frame.f_code) + self.lineno = frame.f_lineno - 1 + self.f_globals = frame.f_globals + self.f_locals = frame.f_locals + self.raw = frame + + def statement(self): + if self.code.fullsource is None: + return py.code.Source("") + return self.code.fullsource.getstatement(self.lineno) + statement = property(statement, None, None, + "statement this frame is at") + + def eval(self, code, **vars): + """ evaluate 'code' in the frame + + 'vars' are optional additional local variables + + returns the result of the evaluation + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def exec_(self, code, **vars): + """ exec 'code' in the frame + + 'vars' are optiona; additional local variables + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + py.builtin.exec_(code, self.f_globals, f_locals ) + + def repr(self, object): + """ return a 'safe' (non-recursive, one-line) string repr for 'object' + """ + return safe_repr(object) + + def is_true(self, object): + return object + + def getargs(self): + """ return a list of tuples (name, value) for all arguments + """ + retval = [] + for arg in self.code.getargs(): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + +class TracebackEntry(object): + """ a single entry in a traceback """ + + exprinfo = None + + def __init__(self, rawentry): + self._rawentry = rawentry + self.frame = py.code.Frame(rawentry.tb_frame) + # Ugh. 2.4 and 2.5 differs here when encountering + # multi-line statements. Not sure about the solution, but + # should be portable + self.lineno = rawentry.tb_lineno - 1 + self.relline = self.lineno - self.frame.code.firstlineno + + def __repr__(self): + return "" %(self.frame.code.path, self.lineno+1) + + def statement(self): + """ return a py.code.Source object for the current statement """ + source = self.frame.code.fullsource + return source.getstatement(self.lineno) + statement = property(statement, None, None, + "statement of this traceback entry.") + + def path(self): + return self.frame.code.path + path = property(path, None, None, "path to the full source code") + + def getlocals(self): + return self.frame.f_locals + locals = property(getlocals, None, None, "locals of underlaying frame") + + def reinterpret(self): + """Reinterpret the failing statement and returns a detailed information + about what operations are performed.""" + if self.exprinfo is None: + from py._code import assertion + source = str(self.statement).strip() + x = assertion.interpret(source, self.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + self.exprinfo = x + return self.exprinfo + + def getfirstlinesource(self): + return self.frame.code.firstlineno + + def getsource(self): + """ return failing source code. """ + source = self.frame.code.fullsource + if source is None: + return None + start = self.getfirstlinesource() + end = self.lineno + try: + _, end = source.getstatementrange(end) + except IndexError: + end = self.lineno + 1 + # heuristic to stop displaying source on e.g. + # if something: # assume this causes a NameError + # # _this_ lines and the one + # below we don't want from entry.getsource() + for i in range(self.lineno, end): + if source[i].rstrip().endswith(':'): + end = i + 1 + break + return source[start:end] + source = property(getsource) + + def ishidden(self): + """ return True if the current frame has a var __tracebackhide__ + resolving to True + + mostly for internal use + """ + try: + return self.frame.eval("__tracebackhide__") + except (SystemExit, KeyboardInterrupt): + raise + except: + return False + + def __str__(self): + try: + fn = str(self.path) + except py.error.Error: + fn = '???' + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except: + line = "???" + return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + + def name(self): + return self.frame.code.raw.co_name + name = property(name, None, None, "co_name of underlaying code") + +class Traceback(list): + """ Traceback objects encapsulate and offer higher level + access to Traceback entries. + """ + Entry = TracebackEntry + def __init__(self, tb): + """ initialize from given python traceback object. """ + if hasattr(tb, 'tb_next'): + def f(cur): + while cur is not None: + yield self.Entry(cur) + cur = cur.tb_next + list.__init__(self, f(tb)) + else: + list.__init__(self, tb) + + def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): + """ return a Traceback instance wrapping part of this Traceback + + by provding any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined + + this allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback) + """ + for x in self: + code = x.frame.code + codepath = code.path + if ((path is None or codepath == path) and + (excludepath is None or (hasattr(codepath, 'relto') and + not codepath.relto(excludepath))) and + (lineno is None or x.lineno == lineno) and + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + return Traceback(x._rawentry) + return self + + def __getitem__(self, key): + val = super(Traceback, self).__getitem__(key) + if isinstance(key, type(slice(0))): + val = self.__class__(val) + return val + + def filter(self, fn=lambda x: not x.ishidden()): + """ return a Traceback instance with certain items removed + + fn is a function that gets a single argument, a TracebackItem + instance, and should return True when the item should be added + to the Traceback, False when not + + by default this removes all the TracebackItems which are hidden + (see ishidden() above) + """ + return Traceback(filter(fn, self)) + + def getcrashentry(self): + """ return last non-hidden traceback entry that lead + to the exception of a traceback. + """ + tb = self.filter() + if not tb: + tb = self + return tb[-1] + + def recursionindex(self): + """ return the index of the frame/TracebackItem where recursion + originates if appropriate, None if no recursion occurred + """ + cache = {} + for i, entry in enumerate(self): + key = entry.frame.code.path, entry.lineno + #print "checking for recursion at", key + l = cache.setdefault(key, []) + if l: + f = entry.frame + loc = f.f_locals + for otherloc in l: + if f.is_true(f.eval(co_equal, + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): + return i + l.append(entry.frame.f_locals) + return None + +co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', + '?', 'eval') + +class ExceptionInfo(object): + """ wraps sys.exc_info() objects and offers + help for navigating the traceback. + """ + _striptext = '' + def __init__(self, tup=None, exprinfo=None): + # NB. all attributes are private! Subclasses or other + # ExceptionInfo-like classes may have different attributes. + if tup is None: + tup = sys.exc_info() + if exprinfo is None and isinstance(tup[1], py.code._AssertionError): + exprinfo = getattr(tup[1], 'msg', None) + if exprinfo is None: + exprinfo = str(tup[1]) + if exprinfo and exprinfo.startswith('assert '): + self._striptext = 'AssertionError: ' + self._excinfo = tup + self.type, self.value, tb = self._excinfo + self.typename = self.type.__name__ + self.traceback = py.code.Traceback(tb) + + def __repr__(self): + return "" % (self.typename, len(self.traceback)) + + def exconly(self, tryshort=False): + """ return the exception as a string + + when 'tryshort' resolves to True, and the exception is a + py.code._AssertionError, only the actual exception part of + the exception representation is returned (so 'AssertionError: ' is + removed from the beginning) + """ + lines = py.std.traceback.format_exception_only(self.type, self.value) + text = ''.join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext):] + return text + + def errisinstance(self, exc): + """ return True if the exception is an instance of exc """ + return isinstance(self.value, exc) + + def _getreprcrash(self): + exconly = self.exconly(tryshort=True) + entry = self.traceback.getcrashentry() + path, lineno = entry.path, entry.lineno + reprcrash = ReprFileLocation(path, lineno+1, exconly) + return reprcrash + + def getrepr(self, showlocals=False, style="long", + abspath=False, tbfilter=True, funcargs=False): + """ return str()able representation of this exception info. + showlocals: show locals per traceback entry + style: long|short|no traceback style + tbfilter: hide entries (where __tracebackhide__ is true) + """ + fmt = FormattedExcinfo(showlocals=showlocals, style=style, + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + return fmt.repr_excinfo(self) + + def __str__(self): + entry = self.traceback[-1] + loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) + return str(loc) + +class FormattedExcinfo(object): + """ presenting information about failing Functions and Generators. """ + # for traceback entries + flow_marker = ">" + fail_marker = "E" + + def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): + self.showlocals = showlocals + self.style = style + self.tbfilter = tbfilter + self.funcargs = funcargs + self.abspath = abspath + + def _getindent(self, source): + # figure out indent for given source + try: + s = str(source.getstatement(len(source)-1)) + except KeyboardInterrupt: + raise + except: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry): + source = entry.getsource() + if source is not None: + source = source.deindent() + return source + + def _saferepr(self, obj): + return safe_repr(obj) + + def repr_args(self, entry): + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(): + args.append((argname, self._saferepr(argvalue))) + return ReprFuncArgs(args) + + def get_source(self, source, line_index=-1, excinfo=None): + """ return formatted and marked up source lines. """ + lines = [] + if source is None: + source = py.code.Source("???") + line_index = 0 + if line_index < 0: + line_index += len(source) + for i in range(len(source)): + if i == line_index: + prefix = self.flow_marker + " " + else: + prefix = " " + line = prefix + source[i] + lines.append(line) + if excinfo is not None: + indent = self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly(self, excinfo, indent=4, markall=False): + lines = [] + indent = " " * indent + # get the real exception information out + exlines = excinfo.exconly(tryshort=True).split('\n') + failindent = self.fail_marker + indent[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indent + return lines + + def repr_locals(self, locals): + if self.showlocals: + lines = [] + keys = list(locals) + keys.sort() + for name in keys: + value = locals[name] + if name == '__builtins__': + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only repr.Repr in + # disguise, so is very configurable. + str_repr = self._saferepr(value) + #if len(str_repr) < 70 or not isinstance(value, + # (list, tuple, dict)): + lines.append("%-10s = %s" %(name, str_repr)) + #else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # py.std.pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + + def repr_traceback_entry(self, entry, excinfo=None): + # excinfo is not None if this is the last tb entry + source = self._getentrysource(entry) + if source is None: + source = py.code.Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + + lines = [] + if self.style == "long": + reprargs = self.repr_args(entry) + lines.extend(self.get_source(source, line_index, excinfo)) + message = excinfo and excinfo.typename or "" + path = self._makepath(entry.path) + filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, filelocrepr) + else: + if self.style == "short": + line = source[line_index].lstrip() + trybasename = getattr(entry.path, 'basename', entry.path) + lines.append(' File "%s", line %d, in %s' % ( + trybasename, entry.lineno+1, entry.name)) + lines.append(" " + line) + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None) + + def _makepath(self, path): + if not self.abspath: + np = py.path.local().bestrelpath(path) + if len(np) < len(str(path)): + path = np + return path + + def repr_traceback(self, excinfo): + traceback = excinfo.traceback + if self.tbfilter: + traceback = traceback.filter() + recursionindex = None + if excinfo.errisinstance(RuntimeError): + recursionindex = traceback.recursionindex() + last = traceback[-1] + entries = [] + extraline = None + for index, entry in enumerate(traceback): + einfo = (last == entry) and excinfo or None + reprentry = self.repr_traceback_entry(entry, einfo) + entries.append(reprentry) + if index == recursionindex: + extraline = "!!! Recursion detected (same locals & position)" + break + return ReprTraceback(entries, extraline, style=self.style) + + def repr_excinfo(self, excinfo): + reprtraceback = self.repr_traceback(excinfo) + reprcrash = excinfo._getreprcrash() + return ReprExceptionInfo(reprtraceback, reprcrash) + +class TerminalRepr: + def __str__(self): + tw = py.io.TerminalWriter(stringio=True) + self.toterminal(tw) + return tw.stringio.getvalue().strip() + + def __repr__(self): + return "<%s instance at %0x>" %(self.__class__, id(self)) + +class ReprExceptionInfo(TerminalRepr): + def __init__(self, reprtraceback, reprcrash): + self.reprtraceback = reprtraceback + self.reprcrash = reprcrash + self.sections = [] + + def addsection(self, name, content, sep="-"): + self.sections.append((name, content, sep)) + + def toterminal(self, tw): + self.reprtraceback.toterminal(tw) + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + +class ReprTraceback(TerminalRepr): + entrysep = "_ " + + def __init__(self, reprentries, extraline, style): + self.reprentries = reprentries + self.extraline = extraline + self.style = style + + def toterminal(self, tw): + sepok = False + for entry in self.reprentries: + if self.style == "long": + if sepok: + tw.sep(self.entrysep) + tw.line("") + sepok = True + entry.toterminal(tw) + if self.extraline: + tw.line(self.extraline) + +class ReprEntry(TerminalRepr): + localssep = "_ " + + def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr): + self.lines = lines + self.reprfuncargs = reprfuncargs + self.reprlocals = reprlocals + self.reprfileloc = filelocrepr + + def toterminal(self, tw): + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + for line in self.lines: + red = line.startswith("E ") + tw.line(line, bold=True, red=red) + if self.reprlocals: + #tw.sep(self.localssep, "Locals") + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self): + return "%s\n%s\n%s" % ("\n".join(self.lines), + self.reprlocals, + self.reprfileloc) + +class ReprFileLocation(TerminalRepr): + def __init__(self, path, lineno, message): + self.path = str(path) + self.lineno = lineno + self.message = message + + def toterminal(self, tw): + # filename and lineno output for each entry, + # using an output format that most editors unterstand + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.line("%s:%s: %s" %(self.path, self.lineno, msg)) + +class ReprLocals(TerminalRepr): + def __init__(self, lines): + self.lines = lines + + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + +class ReprFuncArgs(TerminalRepr): + def __init__(self, args): + self.args = args + + def toterminal(self, tw): + if self.args: + linesofar = "" + for name, value in self.args: + ns = "%s = %s" %(name, value) + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + + +class SafeRepr(repr.Repr): + """ subclass of repr.Repr that limits the resulting size of repr() + and includes information on exceptions raised during the call. + """ + def __init__(self, *args, **kwargs): + repr.Repr.__init__(self, *args, **kwargs) + self.maxstring = 240 # 3 * 80 chars + self.maxother = 160 # 2 * 80 chars + + def repr(self, x): + return self._callhelper(repr.Repr.repr, self, x) + + def repr_instance(self, x, level): + return self._callhelper(builtin_repr, x) + + def _callhelper(self, call, x, *args): + try: + # Try the vanilla repr and make sure that the result is a string + s = call(x, *args) + except (KeyboardInterrupt, MemoryError, SystemExit): + raise + except: + cls, e, tb = sys.exc_info() + try: + exc_name = cls.__name__ + except: + exc_name = 'unknown' + try: + exc_info = str(e) + except: + exc_info = 'unknown' + return '<[%s("%s") raised in repr()] %s object at 0x%x>' % ( + exc_name, exc_info, x.__class__.__name__, id(x)) + else: + if len(s) > self.maxstring: + i = max(0, (self.maxstring-3)//2) + j = max(0, self.maxstring-3-i) + s = s[:i] + '...' + s[len(s)-j:] + return s + +safe_repr = SafeRepr().repr + +oldbuiltins = {} + +def patch_builtins(assertion=True, compile=True): + """ put compile and AssertionError builtins to Python's builtins. """ + if assertion: + from py._code import assertion + l = oldbuiltins.setdefault('AssertionError', []) + l.append(py.builtin.builtins.AssertionError) + py.builtin.builtins.AssertionError = assertion.AssertionError + if compile: + l = oldbuiltins.setdefault('compile', []) + l.append(py.builtin.builtins.compile) + py.builtin.builtins.compile = py.code.compile + +def unpatch_builtins(assertion=True, compile=True): + """ remove compile and AssertionError builtins from Python builtins. """ + if assertion: + py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop() + if compile: + py.builtin.builtins.compile = oldbuiltins['compile'].pop() + +def getrawcode(obj): + """ return code object for given function. """ + obj = getattr(obj, 'im_func', obj) + obj = getattr(obj, 'func_code', obj) + obj = getattr(obj, 'f_code', obj) + obj = getattr(obj, '__code__', obj) + return obj + --- /dev/null +++ b/py/_path/cacheutil.py @@ -0,0 +1,114 @@ +""" +This module contains multithread-safe cache implementations. + +All Caches have + + getorbuild(key, builder) + delentry(key) + +methods and allow configuration when instantiating the cache class. +""" +from time import time as gettime + +class BasicCache(object): + def __init__(self, maxentries=128): + self.maxentries = maxentries + self.prunenum = int(maxentries - maxentries/8) + self._dict = {} + + def clear(self): + self._dict.clear() + + def _getentry(self, key): + return self._dict[key] + + def _putentry(self, key, entry): + self._prunelowestweight() + self._dict[key] = entry + + def delentry(self, key, raising=False): + try: + del self._dict[key] + except KeyError: + if raising: + raise + + def getorbuild(self, key, builder): + try: + entry = self._getentry(key) + except KeyError: + entry = self._build(key, builder) + self._putentry(key, entry) + return entry.value + + def _prunelowestweight(self): + """ prune out entries with lowest weight. """ + numentries = len(self._dict) + if numentries >= self.maxentries: + # evict according to entry's weight + items = [(entry.weight, key) + for key, entry in self._dict.items()] + items.sort() + index = numentries - self.prunenum + if index > 0: + for weight, key in items[:index]: + # in MT situations the element might be gone + self.delentry(key, raising=False) + +class BuildcostAccessCache(BasicCache): + """ A BuildTime/Access-counting cache implementation. + the weight of a value is computed as the product of + + num-accesses-of-a-value * time-to-build-the-value + + The values with the least such weights are evicted + if the cache maxentries threshold is superceded. + For implementation flexibility more than one object + might be evicted at a time. + """ + # time function to use for measuring build-times + + def _build(self, key, builder): + start = gettime() + val = builder() + end = gettime() + return WeightedCountingEntry(val, end-start) + + +class WeightedCountingEntry(object): + def __init__(self, value, oneweight): + self._value = value + self.weight = self._oneweight = oneweight + + def value(self): + self.weight += self._oneweight + return self._value + value = property(value) + +class AgingCache(BasicCache): + """ This cache prunes out cache entries that are too old. + """ + def __init__(self, maxentries=128, maxseconds=10.0): + super(AgingCache, self).__init__(maxentries) + self.maxseconds = maxseconds + + def _getentry(self, key): + entry = self._dict[key] + if entry.isexpired(): + self.delentry(key) + raise KeyError(key) + return entry + + def _build(self, key, builder): + val = builder() + entry = AgingEntry(val, gettime() + self.maxseconds) + return entry + +class AgingEntry(object): + def __init__(self, value, expirationtime): + self.value = value + self.weight = expirationtime + + def isexpired(self): + t = gettime() + return t >= self.weight --- /dev/null +++ b/py/_log/warning.py @@ -0,0 +1,76 @@ +import py, sys + +class DeprecationWarning(DeprecationWarning): + def __init__(self, msg, path, lineno): + self.msg = msg + self.path = path + self.lineno = lineno + def __repr__(self): + return "%s:%d: %s" %(self.path, self.lineno+1, self.msg) + def __str__(self): + return self.msg + +def _apiwarn(startversion, msg, stacklevel=2, function=None): + # below is mostly COPIED from python2.4/warnings.py's def warn() + # Get context information + if isinstance(stacklevel, str): + frame = sys._getframe(1) + level = 1 + found = frame.f_code.co_filename.find(stacklevel) != -1 + while frame: + co = frame.f_code + if co.co_filename.find(stacklevel) == -1: + if found: + stacklevel = level + break + else: + found = True + level += 1 + frame = frame.f_back + else: + stacklevel = 1 + msg = "%s (since version %s)" %(msg, startversion) + warn(msg, stacklevel=stacklevel+1, function=function) + +def warn(msg, stacklevel=1, function=None): + if function is not None: + filename = py.std.inspect.getfile(function) + lineno = py.code.getrawcode(function).co_firstlineno + else: + try: + caller = sys._getframe(stacklevel) + except ValueError: + globals = sys.__dict__ + lineno = 1 + else: + globals = caller.f_globals + lineno = caller.f_lineno + if '__name__' in globals: + module = globals['__name__'] + else: + module = "" + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith(".pyc") or fnl.endswith(".pyo"): + filename = filename[:-1] + elif fnl.endswith("$py.class"): + filename = filename.replace('$py.class', '.py') + else: + if module == "__main__": + try: + filename = sys.argv[0] + except AttributeError: + # embedded interpreters don't have sys.argv, see bug #839151 + filename = '__main__' + if not filename: + filename = module + path = py.path.local(filename) + warning = DeprecationWarning(msg, path, lineno) + py.std.warnings.warn_explicit(warning, category=Warning, + filename=str(warning.path), + lineno=warning.lineno, + registry=py.std.warnings.__dict__.setdefault( + "__warningsregistry__", {}) + ) + --- /dev/null +++ b/py/_cmdline/pycountloc.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# hands on script to compute the non-empty Lines of Code +# for tests and non-test code + +"""\ +py.countloc [PATHS] + +Count (non-empty) lines of python code and number of python files recursively +starting from a list of paths given on the command line (starting from the +current working directory). Distinguish between test files and normal ones and +report them separately. +""" +import py + +def main(): + parser = py.std.optparse.OptionParser(usage=__doc__) + (options, args) = parser.parse_args() + countloc(args) + +def nodot(p): + return p.check(dotfile=0) + +class FileCounter(object): + def __init__(self): + self.file2numlines = {} + self.numlines = 0 + self.numfiles = 0 + + def addrecursive(self, directory, fil="*.py", rec=nodot): + for x in directory.visit(fil, rec): + self.addfile(x) + + def addfile(self, fn, emptylines=False): + if emptylines: + s = len(p.readlines()) + else: + s = 0 + for i in fn.readlines(): + if i.strip(): + s += 1 + self.file2numlines[fn] = s + self.numfiles += 1 + self.numlines += s + + def getnumlines(self, fil): + numlines = 0 + for path, value in self.file2numlines.items(): + if fil(path): + numlines += value + return numlines + + def getnumfiles(self, fil): + numfiles = 0 + for path in self.file2numlines: + if fil(path): + numfiles += 1 + return numfiles + +def get_loccount(locations=None): + if locations is None: + localtions = [py.path.local()] + counter = FileCounter() + for loc in locations: + counter.addrecursive(loc, '*.py', rec=nodot) + + def istestfile(p): + return p.check(fnmatch='test_*.py') + isnottestfile = lambda x: not istestfile(x) + + numfiles = counter.getnumfiles(isnottestfile) + numlines = counter.getnumlines(isnottestfile) + numtestfiles = counter.getnumfiles(istestfile) + numtestlines = counter.getnumlines(istestfile) + + return counter, numfiles, numlines, numtestfiles, numtestlines + +def countloc(paths=None): + if not paths: + paths = ['.'] + locations = [py.path.local(x) for x in paths] + (counter, numfiles, numlines, numtestfiles, + numtestlines) = get_loccount(locations) + + items = counter.file2numlines.items() + items.sort(lambda x,y: cmp(x[1], y[1])) + for x, y in items: + print("%3d %30s" % (y,x)) + + print("%30s %3d" %("number of testfiles", numtestfiles)) + print("%30s %3d" %("number of non-empty testlines", numtestlines)) + print("%30s %3d" %("number of files", numfiles)) + print("%30s %3d" %("number of non-empty lines", numlines)) + --- /dev/null +++ b/py/_cmdline/pyconvert_unittest.py @@ -0,0 +1,249 @@ +import re +import sys +import parser + +d={} +# d is the dictionary of unittest changes, keyed to the old name +# used by unittest. +# d[old][0] is the new replacement function. +# d[old][1] is the operator you will substitute, or '' if there is none. +# d[old][2] is the possible number of arguments to the unittest +# function. + +# Old Unittest Name new name operator # of args +d['assertRaises'] = ('raises', '', ['Any']) +d['fail'] = ('raise AssertionError', '', [0,1]) +d['assert_'] = ('assert', '', [1,2]) +d['failIf'] = ('assert not', '', [1,2]) +d['assertEqual'] = ('assert', ' ==', [2,3]) +d['failIfEqual'] = ('assert not', ' ==', [2,3]) +d['assertIn'] = ('assert', ' in', [2,3]) +d['assertNotIn'] = ('assert', ' not in', [2,3]) +d['assertNotEqual'] = ('assert', ' !=', [2,3]) +d['failUnlessEqual'] = ('assert', ' ==', [2,3]) +d['assertAlmostEqual'] = ('assert round', ' ==', [2,3,4]) +d['failIfAlmostEqual'] = ('assert not round', ' ==', [2,3,4]) +d['assertNotAlmostEqual'] = ('assert round', ' !=', [2,3,4]) +d['failUnlessAlmostEquals'] = ('assert round', ' ==', [2,3,4]) + +# the list of synonyms +d['failUnlessRaises'] = d['assertRaises'] +d['failUnless'] = d['assert_'] +d['assertEquals'] = d['assertEqual'] +d['assertNotEquals'] = d['assertNotEqual'] +d['assertAlmostEquals'] = d['assertAlmostEqual'] +d['assertNotAlmostEquals'] = d['assertNotAlmostEqual'] + +# set up the regular expressions we will need +leading_spaces = re.compile(r'^(\s*)') # this never fails + +pat = '' +for k in d.keys(): # this complicated pattern to match all unittests + pat += '|' + r'^(\s*)' + 'self.' + k + r'\(' # \tself.whatever( + +old_names = re.compile(pat[1:]) +linesep='\n' # nobody will really try to convert files not read + # in text mode, will they? + + +def blocksplitter(fp): + '''split a file into blocks that are headed by functions to rename''' + + blocklist = [] + blockstring = '' + + for line in fp: + interesting = old_names.match(line) + if interesting : + if blockstring: + blocklist.append(blockstring) + blockstring = line # reset the block + else: + blockstring += line + + blocklist.append(blockstring) + return blocklist + +def rewrite_utest(block): + '''rewrite every block to use the new utest functions''' + + '''returns the rewritten unittest, unless it ran into problems, + in which case it just returns the block unchanged. + ''' + utest = old_names.match(block) + + if not utest: + return block + + old = utest.group(0).lstrip()[5:-1] # the name we want to replace + new = d[old][0] # the name of the replacement function + op = d[old][1] # the operator you will use , or '' if there is none. + possible_args = d[old][2] # a list of the number of arguments the + # unittest function could possibly take. + + if possible_args == ['Any']: # just rename assertRaises & friends + return re.sub('self.'+old, new, block) + + message_pos = possible_args[-1] + # the remaining unittests can have an optional message to print + # when they fail. It is always the last argument to the function. + + try: + indent, argl, trailer = decompose_unittest(old, block) + + except SyntaxError: # but we couldn't parse it! + return block + + argnum = len(argl) + if argnum not in possible_args: + # sanity check - this one isn't real either + return block + + elif argnum == message_pos: + message = argl[-1] + argl = argl[:-1] + else: + message = None + + if argnum is 0 or (argnum is 1 and argnum is message_pos): #unittest fail() + string = '' + if message: + message = ' ' + message + + elif message_pos is 4: # assertAlmostEqual & friends + try: + pos = argl[2].lstrip() + except IndexError: + pos = '7' # default if none is specified + string = '(%s -%s, %s)%s 0' % (argl[0], argl[1], pos, op ) + + else: # assert_, assertEquals and all the rest + string = ' ' + op.join(argl) + + if message: + string = string + ',' + message + + return indent + new + string + trailer + +def decompose_unittest(old, block): + '''decompose the block into its component parts''' + + ''' returns indent, arglist, trailer + indent -- the indentation + arglist -- the arguments to the unittest function + trailer -- any extra junk after the closing paren, such as #commment + ''' + + indent = re.match(r'(\s*)', block).group() + pat = re.search('self.' + old + r'\(', block) + + args, trailer = get_expr(block[pat.end():], ')') + arglist = break_args(args, []) + + if arglist == ['']: # there weren't any + return indent, [], trailer + + for i in range(len(arglist)): + try: + parser.expr(arglist[i].lstrip('\t ')) + except SyntaxError: + if i == 0: + arglist[i] = '(' + arglist[i] + ')' + else: + arglist[i] = ' (' + arglist[i] + ')' + + return indent, arglist, trailer + +def break_args(args, arglist): + '''recursively break a string into a list of arguments''' + try: + first, rest = get_expr(args, ',') + if not rest: + return arglist + [first] + else: + return [first] + break_args(rest, arglist) + except SyntaxError: + return arglist + [args] + +def get_expr(s, char): + '''split a string into an expression, and the rest of the string''' + + pos=[] + for i in range(len(s)): + if s[i] == char: + pos.append(i) + if pos == []: + raise SyntaxError # we didn't find the expected char. Ick. + + for p in pos: + # make the python parser do the hard work of deciding which comma + # splits the string into two expressions + try: + parser.expr('(' + s[:p] + ')') + return s[:p], s[p+1:] + except SyntaxError: # It's not an expression yet + pass + raise SyntaxError # We never found anything that worked. + + +def main(): + import sys + import py + + usage = "usage: %prog [-s [filename ...] | [-i | -c filename ...]]" + optparser = py.std.optparse.OptionParser(usage) + + def select_output (option, opt, value, optparser, **kw): + if hasattr(optparser, 'output'): + optparser.error( + 'Cannot combine -s -i and -c options. Use one only.') + else: + optparser.output = kw['output'] + + optparser.add_option("-s", "--stdout", action="callback", + callback=select_output, + callback_kwargs={'output':'stdout'}, + help="send your output to stdout") + + optparser.add_option("-i", "--inplace", action="callback", + callback=select_output, + callback_kwargs={'output':'inplace'}, + help="overwrite files in place") + + optparser.add_option("-c", "--copy", action="callback", + callback=select_output, + callback_kwargs={'output':'copy'}, + help="copy files ... fn.py --> fn_cp.py") + + options, args = optparser.parse_args() + + output = getattr(optparser, 'output', 'stdout') + + if output in ['inplace', 'copy'] and not args: + optparser.error( + '-i and -c option require at least one filename') + + if not args: + s = '' + for block in blocksplitter(sys.stdin): + s += rewrite_utest(block) + sys.stdout.write(s) + + else: + for infilename in args: # no error checking to see if we can open, etc. + infile = file(infilename) + s = '' + for block in blocksplitter(infile): + s += rewrite_utest(block) + if output == 'inplace': + outfile = file(infilename, 'w+') + elif output == 'copy': # yes, just go clobber any existing .cp + outfile = file (infilename[:-3]+ '_cp.py', 'w+') + else: + outfile = sys.stdout + + outfile.write(s) + + +if __name__ == '__main__': + main() --- /dev/null +++ b/py/_compat/dep_doctest.py @@ -0,0 +1,5 @@ +import py + +py.log._apiwarn("1.1", "py.compat.doctest deprecated, use standard library version.", +stacklevel="apipkg") +doctest = py.std.doctest --- /dev/null +++ b/py/_path/common.py @@ -0,0 +1,333 @@ +""" +""" +import os, sys +import py + +class Checkers: + _depend_on_existence = 'exists', 'link', 'dir', 'file' + + def __init__(self, path): + self.path = path + + def dir(self): + raise NotImplementedError + + def file(self): + raise NotImplementedError + + def dotfile(self): + return self.path.basename.startswith('.') + + def ext(self, arg): + if not arg.startswith('.'): + arg = '.' + arg + return self.path.ext == arg + + def exists(self): + raise NotImplementedError + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return FNMatcher(arg)(self.path) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == 'not': + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError( + "no %r checker available for %r" % (name, self.path)) + try: + if py.code.getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (py.error.ENOENT, py.error.ENOTDIR): + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = 'not' + name + if name in kw: + if not kw.get(name): + return False + return True + +class NeverRaised(Exception): + pass + +class PathBase(object): + """ shared implementation for filesystem path objects.""" + Checkers = Checkers + + def __div__(self, other): + return self.join(str(other)) + __truediv__ = __div__ # py3k + + def basename(self): + """ basename part of path. """ + return self._getbyspec('basename')[0] + basename = property(basename, None, None, basename.__doc__) + + def purebasename(self): + """ pure base name of the path.""" + return self._getbyspec('purebasename')[0] + purebasename = property(purebasename, None, None, purebasename.__doc__) + + def ext(self): + """ extension of the path (including the '.').""" + return self._getbyspec('ext')[0] + ext = property(ext, None, None, ext.__doc__) + + def dirpath(self, *args, **kwargs): + """ return the directory Path of the current Path joined + with any given path arguments. + """ + return self.new(basename='').join(*args, **kwargs) + + def read(self, mode='r'): + """ read and return a bytestring from reading the path. """ + if sys.version_info < (2,3): + for x in 'u', 'U': + if x in mode: + mode = mode.replace(x, '') + f = self.open(mode) + try: + return f.read() + finally: + f.close() + + def readlines(self, cr=1): + """ read and return a list of lines from the path. if cr is False, the +newline will be removed from the end of each line. """ + if not cr: + content = self.read('rU') + return content.split('\n') + else: + f = self.open('rU') + try: + return f.readlines() + finally: + f.close() + + def load(self): + """ (deprecated) return object unpickled from self.read() """ + f = self.open('rb') + try: + return py.error.checked_call(py.std.pickle.load, f) + finally: + f.close() + + def move(self, target): + """ move this path to target. """ + if target.relto(self): + raise py.error.EINVAL(target, + "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except py.error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def __repr__(self): + """ return a string representation of this path. """ + return repr(str(self)) + + def check(self, **kw): + """ check a path for existence, or query its properties + + without arguments, this returns True if the path exists (on the + filesystem), False if not + + with (keyword only) arguments, the object compares the value + of the argument with the value of a property with the same name + (if it has one, else it raises a TypeError) + + when for example the keyword argument 'ext' is '.py', this will + return True if self.ext == '.py', False otherwise + """ + if not kw: + kw = {'exists' : 1} + return self.Checkers(self)._evaluate(kw) + + def relto(self, relpath): + """ return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, PathBase)): + raise TypeError("%r: not a string or path object" %(relpath,)) + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + #assert strrelpath[-1] == self.sep + #assert strrelpath[-2] != self.sep + strself = str(self) + if sys.platform == "win32": + if os.path.normcase(strself).startswith( + os.path.normcase(strrelpath)): + return strself[len(strrelpath):] + elif strself.startswith(strrelpath): + return strself[len(strrelpath):] + return "" + + def bestrelpath(self, dest): + """ return a string which is a relative path from self + to dest such that self.join(bestrelpath) == dest and + if not such path can be determined return dest. + """ + try: + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + l = ['..'] * n + if reldest: + l.append(reldest) + target = dest.sep.join(l) + return target + except AttributeError: + return str(dest) + + + def parts(self, reverse=False): + """ return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + l = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + l.insert(0, current) + if reverse: + l.reverse() + return l + + def common(self, other): + """ return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """ return new path object with 'other' added to the basename""" + return self.new(basename=self.basename+str(other)) + + def __cmp__(self, other): + """ return sort value (-1, 0, +1). """ + try: + return cmp(self.strpath, other.strpath) + except AttributeError: + return cmp(str(self), str(other)) # self.path, other.path) + + def __lt__(self, other): + try: + return self.strpath < other.strpath + except AttributeError: + return str(self) < str(other) + + def visit(self, fil=None, rec=None, ignore=NeverRaised): + """ yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + """ + if isinstance(fil, str): + fil = FNMatcher(fil) + if rec: + if isinstance(rec, str): + rec = fnmatch(fil) + elif not hasattr(rec, '__call__'): + rec = None + try: + entries = self.listdir() + except ignore: + return + dirs = [p for p in entries + if p.check(dir=1) and (rec is None or rec(p))] + for subdir in dirs: + for p in subdir.visit(fil=fil, rec=rec, ignore=ignore): + yield p + for p in entries: + if fil is None or fil(p): + yield p + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, '__call__'): + res.sort(sort) + else: + res.sort() + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return self.strpath == str(other) + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + def __call__(self, path): + """return true if the basename/fullname matches the glob-'pattern'. + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + if the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + pattern = self.pattern + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + pattern = '*' + path.sep + pattern + from fnmatch import fnmatch + return fnmatch(name, pattern) + --- /dev/null +++ b/py/_cmdline/__init__.py @@ -0,0 +1,1 @@ +# --- /dev/null +++ b/py/_io/terminalwriter.py @@ -0,0 +1,264 @@ +""" + +Helper functions for writing to terminals and files. + +""" + + +import sys, os +import py + +def _getdimensions(): + import termios,fcntl,struct + call = fcntl.ioctl(0,termios.TIOCGWINSZ,"\000"*8) + height,width = struct.unpack( "hhhh", call ) [:2] + return height, width + +if sys.platform == 'win32': + # ctypes access to the Windows console + + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + FOREGROUND_BLUE = 0x0001 # text color contains blue. + FOREGROUND_GREEN = 0x0002 # text color contains green. + FOREGROUND_RED = 0x0004 # text color contains red. + FOREGROUND_WHITE = 0x0007 + FOREGROUND_INTENSITY = 0x0008 # text color is intensified. + BACKGROUND_BLUE = 0x0010 # background color contains blue. + BACKGROUND_GREEN = 0x0020 # background color contains green. + BACKGROUND_RED = 0x0040 # background color contains red. + BACKGROUND_WHITE = 0x0070 + BACKGROUND_INTENSITY = 0x0080 # background color is intensified. + + def GetStdHandle(kind): + import ctypes + return ctypes.windll.kernel32.GetStdHandle(kind) + + def SetConsoleTextAttribute(handle, attr): + import ctypes + ctypes.windll.kernel32.SetConsoleTextAttribute( + handle, attr) + + def _getdimensions(): + import ctypes + from ctypes import wintypes + + SHORT = ctypes.c_short + class COORD(ctypes.Structure): + _fields_ = [('X', SHORT), + ('Y', SHORT)] + class SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', SHORT), + ('Top', SHORT), + ('Right', SHORT), + ('Bottom', SHORT)] + class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', COORD), + ('dwCursorPosition', COORD), + ('wAttributes', wintypes.WORD), + ('srWindow', SMALL_RECT), + ('dwMaximumWindowSize', COORD)] + STD_OUTPUT_HANDLE = -11 + handle = GetStdHandle(STD_OUTPUT_HANDLE) + info = CONSOLE_SCREEN_BUFFER_INFO() + ctypes.windll.kernel32.GetConsoleScreenBufferInfo( + handle, ctypes.byref(info)) + # Substract one from the width, otherwise the cursor wraps + # and the ending \n causes an empty line to display. + return info.dwSize.Y, info.dwSize.X - 1 + +def get_terminal_width(): + try: + height, width = _getdimensions() + except (SystemExit, KeyboardInterrupt): + raise + except: + # FALLBACK + width = int(os.environ.get('COLUMNS', 80))-1 + # XXX the windows getdimensions may be bogus, let's sanify a bit + width = max(width, 40) # we alaways need 40 chars + return width + +terminal_width = get_terminal_width() + +# XXX unify with _escaped func below +def ansi_print(text, esc, file=None, newline=True, flush=False): + if file is None: + file = sys.stderr + text = text.rstrip() + if esc and not isinstance(esc, tuple): + esc = (esc,) + if esc and sys.platform != "win32" and file.isatty(): + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text + + '\x1b[0m') # ANSI color code "reset" + if newline: + text += '\n' + + if esc and sys.platform == "win32" and file.isatty(): + if 1 in esc: + bold = True + esc = tuple([x for x in esc if x != 1]) + else: + bold = False + esctable = {() : FOREGROUND_WHITE, # normal + (31,): FOREGROUND_RED, # red + (32,): FOREGROUND_GREEN, # green + (33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow + (34,): FOREGROUND_BLUE, # blue + (35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple + (36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan + (37,): FOREGROUND_WHITE, # white + (39,): FOREGROUND_WHITE, # reset + } + attr = esctable.get(esc, FOREGROUND_WHITE) + if bold: + attr |= FOREGROUND_INTENSITY + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + if file is sys.stderr: + handle = GetStdHandle(STD_ERROR_HANDLE) + else: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + SetConsoleTextAttribute(handle, attr) + file.write(text) + SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + else: + file.write(text) + + if flush: + file.flush() + +def should_do_markup(file): + return hasattr(file, 'isatty') and file.isatty() \ + and os.environ.get('TERM') != 'dumb' + +class TerminalWriter(object): + _esctable = dict(black=30, red=31, green=32, yellow=33, + blue=34, purple=35, cyan=36, white=37, + Black=40, Red=41, Green=42, Yellow=43, + Blue=44, Purple=45, Cyan=46, White=47, + bold=1, light=2, blink=5, invert=7) + + # XXX deprecate stringio argument + def __init__(self, file=None, stringio=False, encoding=None): + self.encoding = encoding + + if file is None: + if stringio: + self.stringio = file = py.io.TextIO() + else: + file = py.std.sys.stdout + elif hasattr(file, '__call__'): + file = WriteFile(file, encoding=encoding) + self._file = file + self.fullwidth = get_terminal_width() + self.hasmarkup = should_do_markup(file) + + def _escaped(self, text, esc): + if esc and self.hasmarkup: + text = (''.join(['\x1b[%sm' % cod for cod in esc]) + + text +'\x1b[0m') + return text + + def markup(self, text, **kw): + esc = [] + for name in kw: + if name not in self._esctable: + raise ValueError("unknown markup: %r" %(name,)) + if kw[name]: + esc.append(self._esctable[name]) + return self._escaped(text, tuple(esc)) + + def sep(self, sepchar, title=None, fullwidth=None, **kw): + if fullwidth is None: + fullwidth = self.fullwidth + # the goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = (fullwidth - len(title) - 2) // (2*len(sepchar)) + fill = sepchar * N + line = "%s %s %s" % (fill, title, fill) + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # in some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **kw) + + def write(self, s, **kw): + if s: + s = self._getbytestring(s) + if self.hasmarkup and kw: + s = self.markup(s, **kw) + self._file.write(s) + self._file.flush() + + def _getbytestring(self, s): + # XXX review this and the whole logic + if self.encoding and sys.version_info < (3,0) and isinstance(s, unicode): + return s.encode(self.encoding) + elif not isinstance(s, str): + return str(s) + return s + + def line(self, s='', **kw): + self.write(s, **kw) + self.write('\n') + +class Win32ConsoleWriter(TerminalWriter): + def write(self, s, **kw): + if s: + s = self._getbytestring(s) + if self.hasmarkup: + handle = GetStdHandle(STD_OUTPUT_HANDLE) + + if self.hasmarkup and kw: + attr = 0 + if kw.pop('bold', False): + attr |= FOREGROUND_INTENSITY + + if kw.pop('red', False): + attr |= FOREGROUND_RED + elif kw.pop('blue', False): + attr |= FOREGROUND_BLUE + elif kw.pop('green', False): + attr |= FOREGROUND_GREEN + else: + attr |= FOREGROUND_WHITE + + SetConsoleTextAttribute(handle, attr) + self._file.write(s) + self._file.flush() + if self.hasmarkup: + SetConsoleTextAttribute(handle, FOREGROUND_WHITE) + + def line(self, s="", **kw): + self.write(s+"\n", **kw) + +if sys.platform == 'win32': + TerminalWriter = Win32ConsoleWriter + +class WriteFile(object): + def __init__(self, writemethod, encoding=None): + self.encoding = encoding + self._writemethod = writemethod + + def write(self, data): + if self.encoding: + data = data.encode(self.encoding) + self._writemethod(data) + + def flush(self): + return + + --- /dev/null +++ b/py/_path/gateway/channeltest.py @@ -0,0 +1,65 @@ +import threading + + +class PathServer: + + def __init__(self, channel): + self.channel = channel + self.C2P = {} + self.next_id = 0 + threading.Thread(target=self.serve).start() + + def p2c(self, path): + id = self.next_id + self.next_id += 1 + self.C2P[id] = path + return id + + def command_LIST(self, id, *args): + path = self.C2P[id] + answer = [(self.p2c(p), p.basename) for p in path.listdir(*args)] + self.channel.send(answer) + + def command_DEL(self, id): + del self.C2P[id] + + def command_GET(self, id, spec): + path = self.C2P[id] + self.channel.send(path._getbyspec(spec)) + + def command_READ(self, id): + path = self.C2P[id] + self.channel.send(path.read()) + + def command_JOIN(self, id, resultid, *args): + path = self.C2P[id] + assert resultid not in self.C2P + self.C2P[resultid] = path.join(*args) + + def command_DIRPATH(self, id, resultid): + path = self.C2P[id] + assert resultid not in self.C2P + self.C2P[resultid] = path.dirpath() + + def serve(self): + try: + while 1: + msg = self.channel.receive() + meth = getattr(self, 'command_' + msg[0]) + meth(*msg[1:]) + except EOFError: + pass + +if __name__ == '__main__': + import py + gw = execnet.PopenGateway() + channel = gw._channelfactory.new() + srv = PathServer(channel) + c = gw.remote_exec(""" + import remotepath + p = remotepath.RemotePath(channel.receive(), channel.receive()) + channel.send(len(p.listdir())) + """) + c.send(channel) + c.send(srv.p2c(py.path.local('/tmp'))) + print(c.receive()) --- /dev/null +++ b/py/_compat/dep_textwrap.py @@ -0,0 +1,5 @@ +import py + +py.log._apiwarn("1.1", "py.compat.textwrap deprecated, use standard library version.", + stacklevel="apipkg") +textwrap = py.std.textwrap --- /dev/null +++ b/py/_plugin/pytest_capture.py @@ -0,0 +1,281 @@ +""" +configurable per-test stdout/stderr capturing mechanisms. + +This plugin captures stdout/stderr output for each test separately. +In case of test failures this captured output is shown grouped +togtther with the test. + +The plugin also provides test function arguments that help to +assert stdout/stderr output from within your tests, see the +`funcarg example`_. + + +Capturing of input/output streams during tests +--------------------------------------------------- + +By default ``sys.stdout`` and ``sys.stderr`` are substituted with +temporary streams during the execution of tests and setup/teardown code. +During the whole testing process it will re-use the same temporary +streams allowing to play well with the logging module which easily +takes ownership on these streams. + +Also, 'sys.stdin' is substituted with a file-like "null" object that +does not return any values. This is to immediately error out +on tests that wait on reading something from stdin. + +You can influence output capturing mechanisms from the command line:: + + py.test -s # disable all capturing + py.test --capture=sys # replace sys.stdout/stderr with in-mem files + py.test --capture=fd # point filedescriptors 1 and 2 to temp file + +If you set capturing values in a conftest file like this:: + + # conftest.py + option_capture = 'fd' + +then all tests in that directory will execute with "fd" style capturing. + +sys-level capturing +------------------------------------------ + +Capturing on 'sys' level means that ``sys.stdout`` and ``sys.stderr`` +will be replaced with in-memory files (``py.io.TextIO`` to be precise) +that capture writes and decode non-unicode strings to a unicode object +(using a default, usually, UTF-8, encoding). + +FD-level capturing and subprocesses +------------------------------------------ + +The ``fd`` based method means that writes going to system level files +based on the standard file descriptors will be captured, for example +writes such as ``os.write(1, 'hello')`` will be captured properly. +Capturing on fd-level will include output generated from +any subprocesses created during a test. + +.. _`funcarg example`: + +Example Usage of the capturing Function arguments +--------------------------------------------------- + +You can use the `capsys funcarg`_ and `capfd funcarg`_ to +capture writes to stdout and stderr streams. Using the +funcargs frees your test from having to care about setting/resetting +the old streams and also interacts well with py.test's own +per-test capturing. Here is an example test function: + +.. sourcecode:: python + + def test_myoutput(capsys): + print ("hello") + sys.stderr.write("world\\n") + out, err = capsys.readouterr() + assert out == "hello\\n" + assert err == "world\\n" + print "next" + out, err = capsys.readouterr() + assert out == "next\\n" + +The ``readouterr()`` call snapshots the output so far - +and capturing will be continued. After the test +function finishes the original streams will +be restored. If you want to capture on +the filedescriptor level you can use the ``capfd`` function +argument which offers the same interface. +""" + +import py +import os + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption('--capture', action="store", default=None, + metavar="method", type="choice", choices=['fd', 'sys', 'no'], + help="per-test capturing method: one of fd (default)|sys|no.") + group._addoption('-s', action="store_const", const="no", dest="capture", + help="shortcut for --capture=no.") + +def addouterr(rep, outerr): + repr = getattr(rep, 'longrepr', None) + if not hasattr(repr, 'addsection'): + return + for secname, content in zip(["out", "err"], outerr): + if content: + repr.addsection("Captured std%s" % secname, content.rstrip()) + +def pytest_configure(config): + config.pluginmanager.register(CaptureManager(), 'capturemanager') + +class CaptureManager: + def __init__(self): + self._method2capture = {} + + def _maketempfile(self): + f = py.std.tempfile.TemporaryFile() + newf = py.io.dupfile(f, encoding="UTF-8") + return newf + + def _makestringio(self): + return py.io.TextIO() + + def _startcapture(self, method): + if method == "fd": + return py.io.StdCaptureFD( + out=self._maketempfile(), err=self._maketempfile() + ) + elif method == "sys": + return py.io.StdCapture( + out=self._makestringio(), err=self._makestringio() + ) + else: + raise ValueError("unknown capturing method: %r" % method) + + def _getmethod(self, config, fspath): + if config.option.capture: + method = config.option.capture + else: + try: + method = config._conftest.rget("option_capture", path=fspath) + except KeyError: + method = "fd" + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + method = "sys" + return method + + def resumecapture_item(self, item): + method = self._getmethod(item.config, item.fspath) + if not hasattr(item, 'outerr'): + item.outerr = ('', '') # we accumulate outerr on the item + return self.resumecapture(method) + + def resumecapture(self, method): + if hasattr(self, '_capturing'): + raise ValueError("cannot resume, already capturing with %r" % + (self._capturing,)) + if method != "no": + cap = self._method2capture.get(method) + if cap is None: + cap = self._startcapture(method) + self._method2capture[method] = cap + else: + cap.resume() + self._capturing = method + + def suspendcapture(self, item=None): + self.deactivate_funcargs() + if hasattr(self, '_capturing'): + method = self._capturing + if method != "no": + cap = self._method2capture[method] + outerr = cap.suspend() + else: + outerr = "", "" + del self._capturing + if item: + outerr = (item.outerr[0] + outerr[0], item.outerr[1] + outerr[1]) + return outerr + return "", "" + + def activate_funcargs(self, pyfuncitem): + if not hasattr(pyfuncitem, 'funcargs'): + return + assert not hasattr(self, '_capturing_funcargs') + l = [] + for name, obj in pyfuncitem.funcargs.items(): + if name == 'capfd' and not hasattr(os, 'dup'): + py.test.skip("capfd funcarg needs os.dup") + if name in ('capsys', 'capfd'): + obj._start() + l.append(obj) + if l: + self._capturing_funcargs = l + + def deactivate_funcargs(self): + if hasattr(self, '_capturing_funcargs'): + for capfuncarg in self._capturing_funcargs: + capfuncarg._finalize() + del self._capturing_funcargs + + def pytest_make_collect_report(self, __multicall__, collector): + method = self._getmethod(collector.config, collector.fspath) + self.resumecapture(method) + try: + rep = __multicall__.execute() + finally: + outerr = self.suspendcapture() + addouterr(rep, outerr) + return rep + + def pytest_runtest_setup(self, item): + self.resumecapture_item(item) + + def pytest_runtest_call(self, item): + self.resumecapture_item(item) + self.activate_funcargs(item) + + def pytest_runtest_teardown(self, item): + self.resumecapture_item(item) + + def pytest__teardown_final(self, __multicall__, session): + method = self._getmethod(session.config, None) + self.resumecapture(method) + try: + rep = __multicall__.execute() + finally: + outerr = self.suspendcapture() + if rep: + addouterr(rep, outerr) + return rep + + def pytest_keyboard_interrupt(self, excinfo): + if hasattr(self, '_capturing'): + self.suspendcapture() + + def pytest_runtest_makereport(self, __multicall__, item, call): + self.deactivate_funcargs() + rep = __multicall__.execute() + outerr = self.suspendcapture(item) + if not rep.passed: + addouterr(rep, outerr) + if not rep.passed or rep.when == "teardown": + outerr = ('', '') + item.outerr = outerr + return rep + +def pytest_funcarg__capsys(request): + """captures writes to sys.stdout/sys.stderr and makes + them available successively via a ``capsys.readouterr()`` method + which returns a ``(out, err)`` tuple of captured snapshot strings. + """ + return CaptureFuncarg(request, py.io.StdCapture) + +def pytest_funcarg__capfd(request): + """captures writes to file descriptors 1 and 2 and makes + snapshotted ``(out, err)`` string tuples available + via the ``capsys.readouterr()`` method. If the underlying + platform does not have ``os.dup`` (e.g. Jython) tests using + this funcarg will automatically skip. + """ + return CaptureFuncarg(request, py.io.StdCaptureFD) + + +class CaptureFuncarg: + def __init__(self, request, captureclass): + self._cclass = captureclass + #request.addfinalizer(self._finalize) + + def _start(self): + self.capture = self._cclass() + + def _finalize(self): + if hasattr(self, 'capture'): + self.capture.reset() + del self.capture + + def readouterr(self): + return self.capture.readouterr() + + def close(self): + self.capture.reset() + del self.capture + --- /dev/null +++ b/py/_compat/__init__.py @@ -0,0 +1,2 @@ +""" compatibility modules (taken from 2.4.4) """ + --- /dev/null +++ b/py/_code/_assertionold.py @@ -0,0 +1,558 @@ +import py +import sys, inspect +from compiler import parse, ast, pycodegen +from py._code.assertion import BuiltinAssertionError, _format_explanation + +passthroughex = (KeyboardInterrupt, SystemExit, MemoryError) + +class Failure: + def __init__(self, node): + self.exc, self.value, self.tb = sys.exc_info() + self.node = node + +class View(object): + """View base class. + + If C is a subclass of View, then C(x) creates a proxy object around + the object x. The actual class of the proxy is not C in general, + but a *subclass* of C determined by the rules below. To avoid confusion + we call view class the class of the proxy (a subclass of C, so of View) + and object class the class of x. + + Attributes and methods not found in the proxy are automatically read on x. + Other operations like setting attributes are performed on the proxy, as + determined by its view class. The object x is available from the proxy + as its __obj__ attribute. + + The view class selection is determined by the __view__ tuples and the + optional __viewkey__ method. By default, the selected view class is the + most specific subclass of C whose __view__ mentions the class of x. + If no such subclass is found, the search proceeds with the parent + object classes. For example, C(True) will first look for a subclass + of C with __view__ = (..., bool, ...) and only if it doesn't find any + look for one with __view__ = (..., int, ...), and then ..., object,... + If everything fails the class C itself is considered to be the default. + + Alternatively, the view class selection can be driven by another aspect + of the object x, instead of the class of x, by overriding __viewkey__. + See last example at the end of this module. + """ + + _viewcache = {} + __view__ = () + + def __new__(rootclass, obj, *args, **kwds): + self = object.__new__(rootclass) + self.__obj__ = obj + self.__rootclass__ = rootclass + key = self.__viewkey__() + try: + self.__class__ = self._viewcache[key] + except KeyError: + self.__class__ = self._selectsubclass(key) + return self + + def __getattr__(self, attr): + # attributes not found in the normal hierarchy rooted on View + # are looked up in the object's real class + return getattr(self.__obj__, attr) + + def __viewkey__(self): + return self.__obj__.__class__ + + def __matchkey__(self, key, subclasses): + if inspect.isclass(key): + keys = inspect.getmro(key) + else: + keys = [key] + for key in keys: + result = [C for C in subclasses if key in C.__view__] + if result: + return result + return [] + + def _selectsubclass(self, key): + subclasses = list(enumsubclasses(self.__rootclass__)) + for C in subclasses: + if not isinstance(C.__view__, tuple): + C.__view__ = (C.__view__,) + choices = self.__matchkey__(key, subclasses) + if not choices: + return self.__rootclass__ + elif len(choices) == 1: + return choices[0] + else: + # combine the multiple choices + return type('?', tuple(choices), {}) + + def __repr__(self): + return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) + + +def enumsubclasses(cls): + for subcls in cls.__subclasses__(): + for subsubclass in enumsubclasses(subcls): + yield subsubclass + yield cls + + +class Interpretable(View): + """A parse tree node with a few extra methods.""" + explanation = None + + def is_builtin(self, frame): + return False + + def eval(self, frame): + # fall-back for unknown expression nodes + try: + expr = ast.Expression(self.__obj__) + expr.filename = '' + self.__obj__.filename = '' + co = pycodegen.ExpressionCodeGenerator(expr).getCode() + result = frame.eval(co) + except passthroughex: + raise + except: + raise Failure(self) + self.result = result + self.explanation = self.explanation or frame.repr(self.result) + + def run(self, frame): + # fall-back for unknown statement nodes + try: + expr = ast.Module(None, ast.Stmt([self.__obj__])) + expr.filename = '' + co = pycodegen.ModuleCodeGenerator(expr).getCode() + frame.exec_(co) + except passthroughex: + raise + except: + raise Failure(self) + + def nice_explanation(self): + return _format_explanation(self.explanation) + + +class Name(Interpretable): + __view__ = ast.Name + + def is_local(self, frame): + co = compile('%r in locals() is not globals()' % self.name, '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def is_global(self, frame): + co = compile('%r in globals()' % self.name, '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def is_builtin(self, frame): + co = compile('%r not in locals() and %r not in globals()' % ( + self.name, self.name), '?', 'eval') + try: + return frame.is_true(frame.eval(co)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + super(Name, self).eval(frame) + if not self.is_local(frame): + self.explanation = self.name + +class Compare(Interpretable): + __view__ = ast.Compare + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + for operation, expr2 in self.ops: + if hasattr(self, 'result'): + # shortcutting in chained expressions + if not frame.is_true(self.result): + break + expr2 = Interpretable(expr2) + expr2.eval(frame) + self.explanation = "%s %s %s" % ( + expr.explanation, operation, expr2.explanation) + co = compile("__exprinfo_left %s __exprinfo_right" % operation, + '?', 'eval') + try: + self.result = frame.eval(co, __exprinfo_left=expr.result, + __exprinfo_right=expr2.result) + except passthroughex: + raise + except: + raise Failure(self) + expr = expr2 + +class And(Interpretable): + __view__ = ast.And + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if not frame.is_true(expr.result): + break + self.explanation = '(' + ' and '.join(explanations) + ')' + +class Or(Interpretable): + __view__ = ast.Or + + def eval(self, frame): + explanations = [] + for expr in self.nodes: + expr = Interpretable(expr) + expr.eval(frame) + explanations.append(expr.explanation) + self.result = expr.result + if frame.is_true(expr.result): + break + self.explanation = '(' + ' or '.join(explanations) + ')' + + +# == Unary operations == +keepalive = [] +for astclass, astpattern in { + ast.Not : 'not __exprinfo_expr', + ast.Invert : '(~__exprinfo_expr)', + }.items(): + + class UnaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern, + co=compile(astpattern, '?', 'eval')): + expr = Interpretable(self.expr) + expr.eval(frame) + self.explanation = astpattern.replace('__exprinfo_expr', + expr.explanation) + try: + self.result = frame.eval(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(UnaryArith) + +# == Binary operations == +for astclass, astpattern in { + ast.Add : '(__exprinfo_left + __exprinfo_right)', + ast.Sub : '(__exprinfo_left - __exprinfo_right)', + ast.Mul : '(__exprinfo_left * __exprinfo_right)', + ast.Div : '(__exprinfo_left / __exprinfo_right)', + ast.Mod : '(__exprinfo_left % __exprinfo_right)', + ast.Power : '(__exprinfo_left ** __exprinfo_right)', + }.items(): + + class BinaryArith(Interpretable): + __view__ = astclass + + def eval(self, frame, astpattern=astpattern, + co=compile(astpattern, '?', 'eval')): + left = Interpretable(self.left) + left.eval(frame) + right = Interpretable(self.right) + right.eval(frame) + self.explanation = (astpattern + .replace('__exprinfo_left', left .explanation) + .replace('__exprinfo_right', right.explanation)) + try: + self.result = frame.eval(co, __exprinfo_left=left.result, + __exprinfo_right=right.result) + except passthroughex: + raise + except: + raise Failure(self) + + keepalive.append(BinaryArith) + + +class CallFunc(Interpretable): + __view__ = ast.CallFunc + + def is_bool(self, frame): + co = compile('isinstance(__exprinfo_value, bool)', '?', 'eval') + try: + return frame.is_true(frame.eval(co, __exprinfo_value=self.result)) + except passthroughex: + raise + except: + return False + + def eval(self, frame): + node = Interpretable(self.node) + node.eval(frame) + explanations = [] + vars = {'__exprinfo_fn': node.result} + source = '__exprinfo_fn(' + for a in self.args: + if isinstance(a, ast.Keyword): + keyword = a.name + a = a.expr + else: + keyword = None + a = Interpretable(a) + a.eval(frame) + argname = '__exprinfo_%d' % len(vars) + vars[argname] = a.result + if keyword is None: + source += argname + ',' + explanations.append(a.explanation) + else: + source += '%s=%s,' % (keyword, argname) + explanations.append('%s=%s' % (keyword, a.explanation)) + if self.star_args: + star_args = Interpretable(self.star_args) + star_args.eval(frame) + argname = '__exprinfo_star' + vars[argname] = star_args.result + source += '*' + argname + ',' + explanations.append('*' + star_args.explanation) + if self.dstar_args: + dstar_args = Interpretable(self.dstar_args) + dstar_args.eval(frame) + argname = '__exprinfo_kwds' + vars[argname] = dstar_args.result + source += '**' + argname + ',' + explanations.append('**' + dstar_args.explanation) + self.explanation = "%s(%s)" % ( + node.explanation, ', '.join(explanations)) + if source.endswith(','): + source = source[:-1] + source += ')' + co = compile(source, '?', 'eval') + try: + self.result = frame.eval(co, **vars) + except passthroughex: + raise + except: + raise Failure(self) + if not node.is_builtin(frame) or not self.is_bool(frame): + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +class Getattr(Interpretable): + __view__ = ast.Getattr + + def eval(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + co = compile('__exprinfo_expr.%s' % self.attrname, '?', 'eval') + try: + self.result = frame.eval(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + self.explanation = '%s.%s' % (expr.explanation, self.attrname) + # if the attribute comes from the instance, its value is interesting + co = compile('hasattr(__exprinfo_expr, "__dict__") and ' + '%r in __exprinfo_expr.__dict__' % self.attrname, + '?', 'eval') + try: + from_instance = frame.is_true( + frame.eval(co, __exprinfo_expr=expr.result)) + except passthroughex: + raise + except: + from_instance = True + if from_instance: + r = frame.repr(self.result) + self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) + +# == Re-interpretation of full statements == + +class Assert(Interpretable): + __view__ = ast.Assert + + def run(self, frame): + test = Interpretable(self.test) + test.eval(frame) + # simplify 'assert False where False = ...' + if (test.explanation.startswith('False\n{False = ') and + test.explanation.endswith('\n}')): + test.explanation = test.explanation[15:-2] + # print the result as 'assert ' + self.result = test.result + self.explanation = 'assert ' + test.explanation + if not frame.is_true(test.result): + try: + raise BuiltinAssertionError + except passthroughex: + raise + except: + raise Failure(self) + +class Assign(Interpretable): + __view__ = ast.Assign + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = '... = ' + expr.explanation + # fall-back-run the rest of the assignment + ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) + mod = ast.Module(None, ast.Stmt([ass])) + mod.filename = '' + co = pycodegen.ModuleCodeGenerator(mod).getCode() + try: + frame.exec_(co, __exprinfo_expr=expr.result) + except passthroughex: + raise + except: + raise Failure(self) + +class Discard(Interpretable): + __view__ = ast.Discard + + def run(self, frame): + expr = Interpretable(self.expr) + expr.eval(frame) + self.result = expr.result + self.explanation = expr.explanation + +class Stmt(Interpretable): + __view__ = ast.Stmt + + def run(self, frame): + for stmt in self.nodes: + stmt = Interpretable(stmt) + stmt.run(frame) + + +def report_failure(e): + explanation = e.node.nice_explanation() + if explanation: + explanation = ", in: " + explanation + else: + explanation = "" + sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) + +def check(s, frame=None): + if frame is None: + import sys + frame = sys._getframe(1) + frame = py.code.Frame(frame) + expr = parse(s, 'eval') + assert isinstance(expr, ast.Expression) + node = Interpretable(expr.node) + try: + node.eval(frame) + except passthroughex: + raise + except Failure: + e = sys.exc_info()[1] + report_failure(e) + else: + if not frame.is_true(node.result): + sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) + + +########################################################### +# API / Entry points +# ######################################################### + +def interpret(source, frame, should_fail=False): + module = Interpretable(parse(source, 'exec').node) + #print "got module", module + if isinstance(frame, py.std.types.FrameType): + frame = py.code.Frame(frame) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + return getfailure(e) + except passthroughex: + raise + except: + import traceback + traceback.print_exc() + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --nomagic)") + else: + return None + +def getmsg(excinfo): + if isinstance(excinfo, tuple): + excinfo = py.code.ExceptionInfo(excinfo) + #frame, line = gettbline(tb) + #frame = py.code.Frame(frame) + #return interpret(line, frame) + + tb = excinfo.traceback[-1] + source = str(tb.statement).strip() + x = interpret(source, tb.frame, should_fail=True) + if not isinstance(x, str): + raise TypeError("interpret returned non-string %r" % (x,)) + return x + +def getfailure(e): + explanation = e.node.nice_explanation() + if str(e.value): + lines = explanation.split('\n') + lines[0] += " << %s" % (e.value,) + explanation = '\n'.join(lines) + text = "%s: %s" % (e.exc.__name__, explanation) + if text.startswith('AssertionError: assert '): + text = text[16:] + return text + +def run(s, frame=None): + if frame is None: + import sys + frame = sys._getframe(1) + frame = py.code.Frame(frame) + module = Interpretable(parse(s, 'exec').node) + try: + module.run(frame) + except Failure: + e = sys.exc_info()[1] + report_failure(e) + + +if __name__ == '__main__': + # example: + def f(): + return 5 + def g(): + return 3 + def h(x): + return 'never' + check("f() * g() == 5") + check("not f()") + check("not (f() and g() or 0)") + check("f() == g()") + i = 4 + check("i == f()") + check("len(f()) == 0") + check("isinstance(2+3+4, float)") + + run("x = i") + check("x == 5") + + run("assert not f(), 'oops'") + run("a, b, c = 1, 2") + run("a, b, c = f()") + + check("max([f(),g()]) == 4") + check("'hello'[g()] == 'h'") + run("'guk%d' % h(f())") --- /dev/null +++ b/py/_compat/dep_subprocess.py @@ -0,0 +1,5 @@ + +import py +py.log._apiwarn("1.1", "py.compat.subprocess deprecated, use standard library version.", +stacklevel="apipkg") +subprocess = py.std.subprocess --- /dev/null +++ b/py/_compat/dep_optparse.py @@ -0,0 +1,4 @@ +import py +py.log._apiwarn("1.1", "py.compat.optparse deprecated, use standard library version.", stacklevel="apipkg") + +optparse = py.std.optparse --- /dev/null +++ b/py/_path/__init__.py @@ -0,0 +1,1 @@ +""" unified file system api """ --- /dev/null +++ b/py/_code/_assertionnew.py @@ -0,0 +1,337 @@ +""" +Like _assertion.py but using builtin AST. It should replace _assertion.py +eventually. +""" + +import sys +import ast + +import py +from py._code.assertion import _format_explanation, BuiltinAssertionError + + +if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): + # See http://bugs.jython.org/issue1497 + _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", + "ListComp", "GeneratorExp", "Yield", "Compare", "Call", + "Repr", "Num", "Str", "Attribute", "Subscript", "Name", + "List", "Tuple") + _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", + "AugAssign", "Print", "For", "While", "If", "With", "Raise", + "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", + "Exec", "Global", "Expr", "Pass", "Break", "Continue") + _expr_nodes = set(getattr(ast, name) for name in _exprs) + _stmt_nodes = set(getattr(ast, name) for name in _stmts) + def _is_ast_expr(node): + return node.__class__ in _expr_nodes + def _is_ast_stmt(node): + return node.__class__ in _stmt_nodes +else: + def _is_ast_expr(node): + return isinstance(node, ast.expr) + def _is_ast_stmt(node): + return isinstance(node, ast.stmt) + + +class Failure(Exception): + """Error found while interpreting AST.""" + + def __init__(self, explanation=""): + self.cause = sys.exc_info() + self.explanation = explanation + + +def interpret(source, frame, should_fail=False): + mod = ast.parse(source) + visitor = DebugInterpreter(frame) + try: + visitor.visit(mod) + except Failure: + failure = sys.exc_info()[1] + return getfailure(failure) + if should_fail: + return ("(assertion failed, but when it was re-run for " + "printing intermediate values, it did not fail. Suggestions: " + "compute assert expression before the assert or use --no-assert)") + +def run(offending_line, frame=None): + if frame is None: + frame = py.code.Frame(sys._getframe(1)) + return interpret(offending_line, frame) + +def getfailure(failure): + explanation = _format_explanation(failure.explanation) + value = failure.cause[1] + if str(value): + lines = explanation.splitlines() + if not lines: + lines.append("") + lines[0] += " << %s" % (value,) + explanation = "\n".join(lines) + text = "%s: %s" % (failure.cause[0].__name__, explanation) + if text.startswith("AssertionError: assert "): + text = text[16:] + return text + + +operator_map = { + ast.BitOr : "|", + ast.BitXor : "^", + ast.BitAnd : "&", + ast.LShift : "<<", + ast.RShift : ">>", + ast.Add : "+", + ast.Sub : "-", + ast.Mult : "*", + ast.Div : "/", + ast.FloorDiv : "//", + ast.Mod : "%", + ast.Eq : "==", + ast.NotEq : "!=", + ast.Lt : "<", + ast.LtE : "<=", + ast.Gt : ">", + ast.GtE : ">=", + ast.Is : "is", + ast.IsNot : "is not", + ast.In : "in", + ast.NotIn : "not in" +} + +unary_map = { + ast.Not : "not %s", + ast.Invert : "~%s", + ast.USub : "-%s", + ast.UAdd : "+%s" +} + + +class DebugInterpreter(ast.NodeVisitor): + """Interpret AST nodes to gleam useful debugging information.""" + + def __init__(self, frame): + self.frame = frame + + def generic_visit(self, node): + # Fallback when we don't have a special implementation. + if _is_ast_expr(node): + mod = ast.Expression(node) + co = self._compile(mod) + try: + result = self.frame.eval(co) + except Exception: + raise Failure() + explanation = self.frame.repr(result) + return explanation, result + elif _is_ast_stmt(node): + mod = ast.Module([node]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co) + except Exception: + raise Failure() + return None, None + else: + raise AssertionError("can't handle %s" %(node,)) + + def _compile(self, source, mode="eval"): + return compile(source, "", mode) + + def visit_Expr(self, expr): + return self.visit(expr.value) + + def visit_Module(self, mod): + for stmt in mod.body: + self.visit(stmt) + + def visit_Name(self, name): + explanation, result = self.generic_visit(name) + # See if the name is local. + source = "%r in locals() is not globals()" % (name.id,) + co = self._compile(source) + try: + local = self.frame.eval(co) + except Exception: + # have to assume it isn't + local = False + if not local: + return name.id, result + return explanation, result + + def visit_Compare(self, comp): + left = comp.left + left_explanation, left_result = self.visit(left) + got_result = False + for op, next_op in zip(comp.ops, comp.comparators): + if got_result and not result: + break + next_explanation, next_result = self.visit(next_op) + op_symbol = operator_map[op.__class__] + explanation = "%s %s %s" % (left_explanation, op_symbol, + next_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=next_result) + except Exception: + raise Failure(explanation) + else: + got_result = True + left_explanation, left_result = next_explanation, next_result + return explanation, result + + def visit_BoolOp(self, boolop): + is_or = isinstance(boolop.op, ast.Or) + explanations = [] + for operand in boolop.values: + explanation, result = self.visit(operand) + explanations.append(explanation) + if result == is_or: + break + name = is_or and " or " or " and " + explanation = "(" + name.join(explanations) + ")" + return explanation, result + + def visit_UnaryOp(self, unary): + pattern = unary_map[unary.op.__class__] + operand_explanation, operand_result = self.visit(unary.operand) + explanation = pattern % (operand_explanation,) + co = self._compile(pattern % ("__exprinfo_expr",)) + try: + result = self.frame.eval(co, __exprinfo_expr=operand_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_BinOp(self, binop): + left_explanation, left_result = self.visit(binop.left) + right_explanation, right_result = self.visit(binop.right) + symbol = operator_map[binop.op.__class__] + explanation = "(%s %s %s)" % (left_explanation, symbol, + right_explanation) + source = "__exprinfo_left %s __exprinfo_right" % (symbol,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_left=left_result, + __exprinfo_right=right_result) + except Exception: + raise Failure(explanation) + return explanation, result + + def visit_Call(self, call): + func_explanation, func = self.visit(call.func) + arg_explanations = [] + ns = {"__exprinfo_func" : func} + arguments = [] + for arg in call.args: + arg_explanation, arg_result = self.visit(arg) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + arguments.append(arg_name) + arg_explanations.append(arg_explanation) + for keyword in call.keywords: + arg_explanation, arg_result = self.visit(keyword.value) + arg_name = "__exprinfo_%s" % (len(ns),) + ns[arg_name] = arg_result + keyword_source = "%s=%%s" % (keyword.arg) + arguments.append(keyword_source % (arg_name,)) + arg_explanations.append(keyword_source % (arg_explanation,)) + if call.starargs: + arg_explanation, arg_result = self.visit(call.starargs) + arg_name = "__exprinfo_star" + ns[arg_name] = arg_result + arguments.append("*%s" % (arg_name,)) + arg_explanations.append("*%s" % (arg_explanation,)) + if call.kwargs: + arg_explanation, arg_result = self.visit(call.kwargs) + arg_name = "__exprinfo_kwds" + ns[arg_name] = arg_result + arguments.append("**%s" % (arg_name,)) + arg_explanations.append("**%s" % (arg_explanation,)) + args_explained = ", ".join(arg_explanations) + explanation = "%s(%s)" % (func_explanation, args_explained) + args = ", ".join(arguments) + source = "__exprinfo_func(%s)" % (args,) + co = self._compile(source) + try: + result = self.frame.eval(co, **ns) + except Exception: + raise Failure(explanation) + # Only show result explanation if it's not a builtin call or returns a + # bool. + if not isinstance(call.func, ast.Name) or \ + not self._is_builtin_name(call.func): + source = "isinstance(__exprinfo_value, bool)" + co = self._compile(source) + try: + is_bool = self.frame.eval(co, __exprinfo_value=result) + except Exception: + is_bool = False + if not is_bool: + pattern = "%s\n{%s = %s\n}" + rep = self.frame.repr(result) + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def _is_builtin_name(self, name): + pattern = "%r not in globals() and %r not in locals()" + source = pattern % (name.id, name.id) + co = self._compile(source) + try: + return self.frame.eval(co) + except Exception: + return False + + def visit_Attribute(self, attr): + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + source_explanation, source_result = self.visit(attr.value) + explanation = "%s.%s" % (source_explanation, attr.attr) + source = "__exprinfo_expr.%s" % (attr.attr,) + co = self._compile(source) + try: + result = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + raise Failure(explanation) + # Check if the attr is from an instance. + source = "%r in getattr(__exprinfo_expr, '__dict__', {})" + source = source % (attr.attr,) + co = self._compile(source) + try: + from_instance = self.frame.eval(co, __exprinfo_expr=source_result) + except Exception: + from_instance = True + if from_instance: + rep = self.frame.repr(result) + pattern = "%s\n{%s = %s\n}" + explanation = pattern % (rep, rep, explanation) + return explanation, result + + def visit_Assert(self, assrt): + test_explanation, test_result = self.visit(assrt.test) + if test_explanation.startswith("False\n{False =") and \ + test_explanation.endswith("\n"): + test_explanation = test_explanation[15:-2] + explanation = "assert %s" % (test_explanation,) + if not test_result: + try: + raise BuiltinAssertionError + except Exception: + raise Failure(explanation) + return explanation, test_result + + def visit_Assign(self, assign): + value_explanation, value_result = self.visit(assign.value) + explanation = "... = %s" % (value_explanation,) + name = ast.Name("__exprinfo_expr", ast.Load(), assign.value.lineno, + assign.value.col_offset) + new_assign = ast.Assign(assign.targets, name, assign.lineno, + assign.col_offset) + mod = ast.Module([new_assign]) + co = self._compile(mod, "exec") + try: + self.frame.exec_(co, __exprinfo_expr=value_result) + except Exception: + raise Failure(explanation) + return explanation, value_result --- /dev/null +++ b/py/_plugin/pytest_hooklog.py @@ -0,0 +1,33 @@ +""" log invocations of extension hooks to a file. """ +import py + +def pytest_addoption(parser): + parser.addoption("--hooklog", dest="hooklog", default=None, + help="write hook calls to the given file.") + +def pytest_configure(config): + hooklog = config.getvalue("hooklog") + if hooklog: + config._hooklogfile = open(hooklog, 'w') + config._hooklog_oldperformcall = config.hook._performcall + config.hook._performcall = (lambda name, multicall: + logged_call(name=name, multicall=multicall, config=config)) + +def logged_call(name, multicall, config): + f = config._hooklogfile + f.write("%s(**%s)\n" % (name, multicall.kwargs)) + try: + res = config._hooklog_oldperformcall(name=name, multicall=multicall) + except: + f.write("-> exception") + raise + f.write("-> %r" % (res,)) + return res + +def pytest_unconfigure(config): + try: + del config.hook.__dict__['_performcall'] + except KeyError: + pass + else: + config._hooklogfile.close() --- /dev/null +++ b/py/_log/log.py @@ -0,0 +1,186 @@ +""" +basic logging functionality based on a producer/consumer scheme. + +XXX implement this API: (maybe put it into slogger.py?) + + log = Logger( + info=py.log.STDOUT, + debug=py.log.STDOUT, + command=None) + log.info("hello", "world") + log.command("hello", "world") + + log = Logger(info=Logger(something=...), + debug=py.log.STDOUT, + command=None) +""" +import py, sys + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + + +class Producer(object): + """ (deprecated) Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. Used extensively by PyPy-1.1. + """ + + Message = Message # to allow later customization + keywords2consumer = {} + + def __init__(self, keywords, keywordmapper=None, **kw): + if hasattr(keywords, 'split'): + keywords = tuple(keywords.split()) + self._keywords = keywords + if keywordmapper is None: + keywordmapper = default_keywordmapper + self._keywordmapper = keywordmapper + + def __repr__(self): + return "" % ":".join(self._keywords) + + def __getattr__(self, name): + if '_' in name: + raise AttributeError(name) + producer = self.__class__(self._keywords + (name,)) + setattr(self, name, producer) + return producer + + def __call__(self, *args): + """ write a message to the appropriate consumer(s) """ + func = self._keywordmapper.getconsumer(self._keywords) + if func is not None: + func(self.Message(self._keywords, args)) + +class KeywordMapper: + def __init__(self): + self.keywords2consumer = {} + + def getstate(self): + return self.keywords2consumer.copy() + def setstate(self, state): + self.keywords2consumer.clear() + self.keywords2consumer.update(state) + + def getconsumer(self, keywords): + """ return a consumer matching the given keywords. + + tries to find the most suitable consumer by walking, starting from + the back, the list of keywords, the first consumer matching a + keyword is returned (falling back to py.log.default) + """ + for i in range(len(keywords), 0, -1): + try: + return self.keywords2consumer[keywords[:i]] + except KeyError: + continue + return self.keywords2consumer.get('default', default_consumer) + + def setconsumer(self, keywords, consumer): + """ set a consumer for a set of keywords. """ + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(filter(None, keywords.split())) + elif hasattr(keywords, '_keywords'): + keywords = keywords._keywords + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not py.builtin.callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError( + "%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + self.keywords2consumer[keywords] = consumer + +def default_consumer(msg): + """ the default consumer, prints the message to stdout (using 'print') """ + sys.stderr.write(str(msg)+"\n") + +default_keywordmapper = KeywordMapper() + +def setconsumer(keywords, consumer): + default_keywordmapper.setconsumer(keywords, consumer) + +def setstate(state): + default_keywordmapper.setstate(state) +def getstate(): + return default_keywordmapper.getstate() + +# +# Consumers +# + +class File(object): + """ log consumer wrapping a file(-like) object """ + def __init__(self, f): + assert hasattr(f, 'write') + #assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + self._file.write(str(msg) + "\n") + if hasattr(self._file, 'flush'): + self._file.flush() + +class Path(object): + """ log consumer that opens and writes to a Path """ + def __init__(self, filename, append=False, + delayed_create=False, buffering=False): + self._append = append + self._filename = str(filename) + self._buffering = buffering + if not delayed_create: + self._openfile() + + def _openfile(self): + mode = self._append and 'a' or 'w' + f = open(self._filename, mode) + self._file = f + + def __call__(self, msg): + """ write a message to the log """ + if not hasattr(self, "_file"): + self._openfile() + self._file.write(str(msg) + "\n") + if not self._buffering: + self._file.flush() + +def STDOUT(msg): + """ consumer that writes to sys.stdout """ + sys.stdout.write(str(msg)+"\n") + +def STDERR(msg): + """ consumer that writes to sys.stderr """ + sys.stderr.write(str(msg)+"\n") + +class Syslog: + """ consumer that writes to the syslog daemon """ + + def __init__(self, priority = None): + if priority is None: + priority = self.LOG_INFO + self.priority = priority + + def __call__(self, msg): + """ write a message to the log """ + py.std.syslog.syslog(self.priority, str(msg)) + +for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): + _prio = "LOG_" + _prio + try: + setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) + except AttributeError: + pass --- /dev/null +++ b/py/_builtin.py @@ -0,0 +1,203 @@ +import sys + +try: + reversed = reversed +except NameError: + def reversed(sequence): + """reversed(sequence) -> reverse iterator over values of the sequence + + Return a reverse iterator + """ + if hasattr(sequence, '__reversed__'): + return sequence.__reversed__() + if not hasattr(sequence, '__getitem__'): + raise TypeError("argument to reversed() must be a sequence") + return reversed_iterator(sequence) + + class reversed_iterator(object): + + def __init__(self, seq): + self.seq = seq + self.remaining = len(seq) + + def __iter__(self): + return self + + def next(self): + i = self.remaining + if i > 0: + i -= 1 + item = self.seq[i] + self.remaining = i + return item + raise StopIteration + + def __length_hint__(self): + return self.remaining + +try: + sorted = sorted +except NameError: + builtin_cmp = cmp # need to use cmp as keyword arg + + def sorted(iterable, cmp=None, key=None, reverse=0): + use_cmp = None + if key is not None: + if cmp is None: + def use_cmp(x, y): + return builtin_cmp(x[0], y[0]) + else: + def use_cmp(x, y): + return cmp(x[0], y[0]) + l = [(key(element), element) for element in iterable] + else: + if cmp is not None: + use_cmp = cmp + l = list(iterable) + if use_cmp is not None: + l.sort(use_cmp) + else: + l.sort() + if reverse: + l.reverse() + if key is not None: + return [element for (_, element) in l] + return l + +try: + set, frozenset = set, frozenset +except NameError: + from sets import set, frozenset + +# pass through +enumerate = enumerate + +try: + BaseException = BaseException +except NameError: + BaseException = Exception + +try: + GeneratorExit = GeneratorExit +except NameError: + class GeneratorExit(Exception): + """ This exception is never raised, it is there to make it possible to + write code compatible with CPython 2.5 even in lower CPython + versions.""" + pass + GeneratorExit.__module__ = 'exceptions' + +if sys.version_info >= (3, 0): + exec ("print_ = print ; exec_=exec") + import builtins + + # some backward compatibility helpers + _basestring = str + def _totext(obj, encoding): + if isinstance(obj, bytes): + obj = obj.decode(encoding) + elif not isinstance(obj, str): + obj = str(obj) + return obj + + def _isbytes(x): + return isinstance(x, bytes) + def _istext(x): + return isinstance(x, str) + + def _getimself(function): + return getattr(function, '__self__', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def execfile(fn, globs=None, locs=None): + if globs is None: + back = sys._getframe(1) + globs = back.f_globals + locs = back.f_locals + del back + elif locs is None: + locs = globs + fp = open(fn, "rb") + try: + source = fp.read() + finally: + fp.close() + co = compile(source, fn, "exec", dont_inherit=True) + exec_(co, globs, locs) + + def callable(obj): + return hasattr(obj, "__call__") + +else: + import __builtin__ as builtins + _totext = unicode + _basestring = basestring + execfile = execfile + callable = callable + def _isbytes(x): + return isinstance(x, str) + def _istext(x): + return isinstance(x, unicode) + + def _getimself(function): + return getattr(function, 'im_self', None) + + def _getfuncdict(function): + return getattr(function, "__dict__", None) + + def print_(*args, **kwargs): + """ minimal backport of py3k print statement. """ + sep = ' ' + if 'sep' in kwargs: + sep = kwargs.pop('sep') + end = '\n' + if 'end' in kwargs: + end = kwargs.pop('end') + file = 'file' in kwargs and kwargs.pop('file') or sys.stdout + if kwargs: + args = ", ".join([str(x) for x in kwargs]) + raise TypeError("invalid keyword arguments: %s" % args) + at_start = True + for x in args: + if not at_start: + file.write(sep) + file.write(str(x)) + at_start = False + file.write(end) + + def exec_(obj, globals=None, locals=None): + """ minimal backport of py3k exec statement. """ + if globals is None: + frame = sys._getframe(1) + globals = frame.f_globals + if locals is None: + locals = frame.f_locals + elif locals is None: + locals = globals + exec2(obj, globals, locals) + +if sys.version_info >= (3,0): + exec (""" +def _reraise(cls, val, tb): + assert hasattr(val, '__traceback__') + raise val +""") +else: + exec (""" +def _reraise(cls, val, tb): + raise cls, val, tb +def exec2(obj, globals, locals): + exec obj in globals, locals +""") + +def _tryimport(*names): + """ return the first successfully imported module. """ + assert names + for name in names: + try: + return __import__(name, None, None, '__doc__') + except ImportError: + excinfo = sys.exc_info() + _reraise(*excinfo) --- /dev/null +++ b/py/_code/oldmagic.py @@ -0,0 +1,62 @@ +""" deprecated module for turning on/off some features. """ + +import py + +from py.builtin import builtins as cpy_builtin + +def invoke(assertion=False, compile=False): + """ (deprecated) invoke magic, currently you can specify: + + assertion patches the builtin AssertionError to try to give + more meaningful AssertionErrors, which by means + of deploying a mini-interpreter constructs + a useful error message. + """ + py.log._apiwarn("1.1", + "py.magic.invoke() is deprecated, use py.code.patch_builtins()", + stacklevel=2, + ) + py.code.patch_builtins(assertion=assertion, compile=compile) + +def revoke(assertion=False, compile=False): + """ (deprecated) revoke previously invoked magic (see invoke()).""" + py.log._apiwarn("1.1", + "py.magic.revoke() is deprecated, use py.code.unpatch_builtins()", + stacklevel=2, + ) + py.code.unpatch_builtins(assertion=assertion, compile=compile) + +patched = {} + +def patch(namespace, name, value): + """ (deprecated) rebind the 'name' on the 'namespace' to the 'value', + possibly and remember the original value. Multiple + invocations to the same namespace/name pair will + remember a list of old values. + """ + py.log._apiwarn("1.1", + "py.magic.patch() is deprecated, in tests use monkeypatch funcarg.", + stacklevel=2, + ) + nref = (namespace, name) + orig = getattr(namespace, name) + patched.setdefault(nref, []).append(orig) + setattr(namespace, name, value) + return orig + +def revert(namespace, name): + """ (deprecated) revert to the orginal value the last patch modified. + Raise ValueError if no such original value exists. + """ + py.log._apiwarn("1.1", + "py.magic.revert() is deprecated, in tests use monkeypatch funcarg.", + stacklevel=2, + ) + nref = (namespace, name) + if nref not in patched or not patched[nref]: + raise ValueError("No original value stored for %s.%s" % nref) + current = getattr(namespace, name) + orig = patched[nref].pop() + setattr(namespace, name, orig) + return current + --- /dev/null +++ b/py/_cmdline/pywhich.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +"""\ +py.which [name] + +print the location of the given python module or package name +""" + +import sys + +def main(): + name = sys.argv[1] + try: + mod = __import__(name) + except ImportError: + sys.stderr.write("could not import: " + name + "\n") + else: + try: + location = mod.__file__ + except AttributeError: + sys.stderr.write("module (has no __file__): " + str(mod)) + else: + print(location) --- /dev/null +++ b/py/_path/svnwc.py @@ -0,0 +1,1231 @@ +""" +svn-Command based Implementation of a Subversion WorkingCopy Path. + + SvnWCCommandPath is the main class. + +""" + +import os, sys, time, re, calendar +import py +import subprocess +from py._path import common + +#----------------------------------------------------------- +# Caching latest repository revision and repo-paths +# (getting them is slow with the current implementations) +# +# XXX make mt-safe +#----------------------------------------------------------- + +class cache: + proplist = {} + info = {} + entries = {} + prop = {} + +class RepoEntry: + def __init__(self, url, rev, timestamp): + self.url = url + self.rev = rev + self.timestamp = timestamp + + def __str__(self): + return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp) + +class RepoCache: + """ The Repocache manages discovered repository paths + and their revisions. If inside a timeout the cache + will even return the revision of the root. + """ + timeout = 20 # seconds after which we forget that we know the last revision + + def __init__(self): + self.repos = [] + + def clear(self): + self.repos = [] + + def put(self, url, rev, timestamp=None): + if rev is None: + return + if timestamp is None: + timestamp = time.time() + + for entry in self.repos: + if url == entry.url: + entry.timestamp = timestamp + entry.rev = rev + #print "set repo", entry + break + else: + entry = RepoEntry(url, rev, timestamp) + self.repos.append(entry) + #print "appended repo", entry + + def get(self, url): + now = time.time() + for entry in self.repos: + if url.startswith(entry.url): + if now < entry.timestamp + self.timeout: + #print "returning immediate Etrny", entry + return entry.url, entry.rev + return entry.url, -1 + return url, -1 + +repositories = RepoCache() + + +# svn support code + +ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested +if sys.platform == "win32": + ALLOWED_CHARS += ":" +ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:' + +def _getsvnversion(ver=[]): + try: + return ver[0] + except IndexError: + v = py.process.cmdexec("svn -q --version") + v.strip() + v = '.'.join(v.split('.')[:2]) + ver.append(v) + return v + +def _escape_helper(text): + text = str(text) + if py.std.sys.platform != 'win32': + text = str(text).replace('$', '\\$') + return text + +def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS): + for c in str(text): + if c.isalnum(): + continue + if c in allowed_chars: + continue + return True + return False + +def checkbadchars(url): + # (hpk) not quite sure about the exact purpose, guido w.? + proto, uri = url.split("://", 1) + if proto != "file": + host, uripath = uri.split('/', 1) + # only check for bad chars in the non-protocol parts + if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \ + or _check_for_bad_chars(uripath, ALLOWED_CHARS)): + raise ValueError("bad char in %r" % (url, )) + + +#_______________________________________________________________ + +class SvnPathBase(common.PathBase): + """ Base implementation for SvnPath implementations. """ + sep = '/' + + def _geturl(self): + return self.strpath + url = property(_geturl, None, None, "url of this svn-path.") + + def __str__(self): + """ return a string representation (including rev-number) """ + return self.strpath + + def __hash__(self): + return hash(self.strpath) + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + obj.rev = kw.get('rev', self.rev) + obj.auth = kw.get('auth', self.auth) + dirname, basename, purebasename, ext = self._getbyspec( + "dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + ext = kw.setdefault('ext', ext) + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + if kw['basename']: + obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw + else: + obj.strpath = "%(dirname)s" % kw + return obj + + def _getbyspec(self, spec): + """ get specified parts of the path. 'arg' is a string + with comma separated path parts. The parts are returned + in exactly the order of the specification. + + you may specify the following parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + for name in spec.split(','): + name = name.strip() + if name == 'dirname': + res.append(self.sep.join(parts[:-1])) + elif name == 'basename': + res.append(parts[-1]) + else: + basename = parts[-1] + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + res.append(purebasename) + elif name == 'ext': + res.append(ext) + else: + raise NameError("Don't know part %r" % name) + return res + + def __eq__(self, other): + """ return true if path and rev attributes each match """ + return (str(self) == str(other) and + (self.rev == other.rev or self.rev == other.rev)) + + def __ne__(self, other): + return not self == other + + def join(self, *args): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + + args = tuple([arg.strip(self.sep) for arg in args]) + parts = (self.strpath, ) + args + newpath = self.__class__(self.sep.join(parts), self.rev, self.auth) + return newpath + + def propget(self, name): + """ return the content of the given property. """ + value = self._propget(name) + return value + + def proplist(self): + """ list all property names. """ + content = self._proplist() + return content + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + # shared help methods + + def _escape(self, cmd): + return _escape_helper(cmd) + + + #def _childmaxrev(self): + # """ return maximum revision number of childs (or self.rev if no childs) """ + # rev = self.rev + # for name, info in self._listdir_nameinfo(): + # rev = max(rev, info.created_rev) + # return rev + + #def _getlatestrevision(self): + # """ return latest repo-revision for this path. """ + # url = self.strpath + # path = self.__class__(url, None) + # + # # we need a long walk to find the root-repo and revision + # while 1: + # try: + # rev = max(rev, path._childmaxrev()) + # previous = path + # path = path.dirpath() + # except (IOError, process.cmdexec.Error): + # break + # if rev is None: + # raise IOError, "could not determine newest repo revision for %s" % self + # return rev + + class Checkers(common.Checkers): + def dir(self): + try: + return self.path.info().kind == 'dir' + except py.error.Error: + return self._listdirworks() + + def _listdirworks(self): + try: + self.path.listdir() + except py.error.ENOENT: + return False + else: + return True + + def file(self): + try: + return self.path.info().kind == 'file' + except py.error.ENOENT: + return False + + def exists(self): + try: + return self.path.info() + except py.error.ENOENT: + return self._listdirworks() + +def parse_apr_time(timestr): + i = timestr.rfind('.') + if i == -1: + raise ValueError("could not parse %s" % timestr) + timestr = timestr[:i] + parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S") + return time.mktime(parsedtime) + +class PropListDict(dict): + """ a Dictionary which fetches values (InfoSvnCommand instances) lazily""" + def __init__(self, path, keynames): + dict.__init__(self, [(x, None) for x in keynames]) + self.path = path + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + if value is None: + value = self.path.propget(key) + dict.__setitem__(self, key, value) + return value + +def fixlocale(): + if sys.platform != 'win32': + return 'LC_ALL=C ' + return '' + +# some nasty chunk of code to solve path and url conversion and quoting issues +ILLEGAL_CHARS = '* | \ / : < > ? \t \n \x0b \x0c \r'.split(' ') +if os.sep in ILLEGAL_CHARS: + ILLEGAL_CHARS.remove(os.sep) +ISWINDOWS = sys.platform == 'win32' +_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I) +def _check_path(path): + illegal = ILLEGAL_CHARS[:] + sp = path.strpath + if ISWINDOWS: + illegal.remove(':') + if not _reg_allow_disk.match(sp): + raise ValueError('path may not contain a colon (:)') + for char in sp: + if char not in string.printable or char in illegal: + raise ValueError('illegal character %r in path' % (char,)) + +def path_to_fspath(path, addat=True): + _check_path(path) + sp = path.strpath + if addat and path.rev != -1: + sp = '%s@%s' % (sp, path.rev) + elif addat: + sp = '%s at HEAD' % (sp,) + return sp + +def url_from_path(path): + fspath = path_to_fspath(path, False) + quote = py.std.urllib.quote + if ISWINDOWS: + match = _reg_allow_disk.match(fspath) + fspath = fspath.replace('\\', '/') + if match.group(1): + fspath = '/%s%s' % (match.group(1).replace('\\', '/'), + quote(fspath[len(match.group(1)):])) + else: + fspath = quote(fspath) + else: + fspath = quote(fspath) + if path.rev != -1: + fspath = '%s@%s' % (fspath, path.rev) + else: + fspath = '%s at HEAD' % (fspath,) + return 'file://%s' % (fspath,) + +class SvnAuth(object): + """ container for auth information for Subversion """ + def __init__(self, username, password, cache_auth=True, interactive=True): + self.username = username + self.password = password + self.cache_auth = cache_auth + self.interactive = interactive + + def makecmdoptions(self): + uname = self.username.replace('"', '\\"') + passwd = self.password.replace('"', '\\"') + ret = [] + if uname: + ret.append('--username="%s"' % (uname,)) + if passwd: + ret.append('--password="%s"' % (passwd,)) + if not self.cache_auth: + ret.append('--no-auth-cache') + if not self.interactive: + ret.append('--non-interactive') + return ' '.join(ret) + + def __str__(self): + return "" %(self.username,) + +rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)') + +class SvnWCCommandPath(common.PathBase): + """ path implementation offering access/modification to svn working copies. + It has methods similar to the functions in os.path and similar to the + commands of the svn client. + """ + sep = os.sep + + def __new__(cls, wcpath=None, auth=None): + self = object.__new__(cls) + if isinstance(wcpath, cls): + if wcpath.__class__ == cls: + return wcpath + wcpath = wcpath.localpath + if _check_for_bad_chars(str(wcpath), + ALLOWED_CHARS): + raise ValueError("bad char in wcpath %s" % (wcpath, )) + self.localpath = py.path.local(wcpath) + self.auth = auth + return self + + strpath = property(lambda x: str(x.localpath), None, None, "string path") + rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision") + + def __eq__(self, other): + return self.localpath == getattr(other, 'localpath', None) + + def _geturl(self): + if getattr(self, '_url', None) is None: + info = self.info() + self._url = info.url #SvnPath(info.url, info.rev) + assert isinstance(self._url, py.builtin._basestring) + return self._url + + url = property(_geturl, None, None, "url of this WC item") + + def _escape(self, cmd): + return _escape_helper(cmd) + + def dump(self, obj): + """ pickle object into path location""" + return self.localpath.dump(obj) + + def svnurl(self): + """ return current SvnPath for this WC-item. """ + info = self.info() + return py.path.svnurl(info.url) + + def __repr__(self): + return "svnwc(%r)" % (self.strpath) # , self._url) + + def __str__(self): + return str(self.localpath) + + def _makeauthoptions(self): + if self.auth is None: + return '' + return self.auth.makecmdoptions() + + def _authsvn(self, cmd, args=None): + args = args and list(args) or [] + args.append(self._makeauthoptions()) + return self._svn(cmd, *args) + + def _svn(self, cmd, *args): + l = ['svn %s' % cmd] + args = [self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._escape(self.strpath)) + # try fixing the locale because we can't otherwise parse + string = fixlocale() + " ".join(l) + try: + try: + key = 'LC_MESSAGES' + hold = os.environ.get(key) + os.environ[key] = 'C' + out = py.process.cmdexec(string) + finally: + if hold: + os.environ[key] = hold + else: + del os.environ[key] + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + strerr = e.err.lower() + if strerr.find('file not found') != -1: + raise py.error.ENOENT(self) + if (strerr.find('file exists') != -1 or + strerr.find('file already exists') != -1 or + strerr.find("can't create directory") != -1): + raise py.error.EEXIST(self) + raise + return out + + def switch(self, url): + """ switch to given URL. """ + self._authsvn('switch', [url]) + + def checkout(self, url=None, rev=None): + """ checkout from url to local wcpath. """ + args = [] + if url is None: + url = self.url + if rev is None or rev == -1: + if (py.std.sys.platform != 'win32' and + _getsvnversion() == '1.3'): + url += "@HEAD" + else: + if _getsvnversion() == '1.3': + url += "@%d" % rev + else: + args.append('-r' + str(rev)) + args.append(url) + self._authsvn('co', args) + + def update(self, rev='HEAD', interactive=True): + """ update working copy item to given revision. (None -> HEAD). """ + opts = ['-r', rev] + if not interactive: + opts.append("--non-interactive") + self._authsvn('up', opts) + + def write(self, content, mode='w'): + """ write content into local filesystem wc. """ + self.localpath.write(content, mode) + + def dirpath(self, *args): + """ return the directory Path of the current Path. """ + return self.__class__(self.localpath.dirpath(*args), auth=self.auth) + + def _ensuredirs(self): + parent = self.dirpath() + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + self.mkdir() + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'directory=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if p.check(): + if p.check(versioned=False): + p.add() + return p + if kwargs.get('dir', 0): + return p._ensuredirs() + parent = p.dirpath() + parent._ensuredirs() + p.write("") + p.add() + return p + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + if args: + return self.join(*args).mkdir() + else: + self._svn('mkdir') + return self + + def add(self): + """ add ourself to svn """ + self._svn('add') + + def remove(self, rec=1, force=1): + """ remove a file or a directory tree. 'rec'ursive is + ignored and considered always true (because of + underlying svn semantics. + """ + assert rec, "svn cannot remove non-recursively" + if not self.check(versioned=True): + # not added to svn (anymore?), just remove + py.path.local(self).remove() + return + flags = [] + if force: + flags.append('--force') + self._svn('remove', *flags) + + def copy(self, target): + """ copy path to target.""" + py.process.cmdexec("svn copy %s %s" %(str(self), str(target))) + + def rename(self, target): + """ rename this path to target. """ + py.process.cmdexec("svn move --force %s %s" %(str(self), str(target))) + + def lock(self): + """ set a lock (exclusive) on the resource """ + out = self._authsvn('lock').strip() + if not out: + # warning or error, raise exception + raise Exception(out[4:]) + + def unlock(self): + """ unset a previously set lock """ + out = self._authsvn('unlock').strip() + if out.startswith('svn:'): + # warning or error, raise exception + raise Exception(out[4:]) + + def cleanup(self): + """ remove any locks from the resource """ + # XXX should be fixed properly!!! + try: + self.unlock() + except: + pass + + def status(self, updates=0, rec=0, externals=0): + """ return (collective) Status object for this file. """ + # http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1 + # 2201 2192 jum test + # XXX + if externals: + raise ValueError("XXX cannot perform status() " + "on external items yet") + else: + #1.2 supports: externals = '--ignore-externals' + externals = '' + if rec: + rec= '' + else: + rec = '--non-recursive' + + # XXX does not work on all subversion versions + #if not externals: + # externals = '--ignore-externals' + + if updates: + updates = '-u' + else: + updates = '' + + try: + cmd = 'status -v --xml --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + except py.process.cmdexec.Error: + cmd = 'status -v --no-ignore %s %s %s' % ( + updates, rec, externals) + out = self._authsvn(cmd) + rootstatus = WCStatus(self).fromstring(out, self) + else: + rootstatus = XMLWCStatus(self).fromstring(out, self) + return rootstatus + + def diff(self, rev=None): + """ return a diff of the current path against revision rev (defaulting + to the last one). + """ + args = [] + if rev is not None: + args.append("-r %d" % rev) + out = self._authsvn('diff', args) + return out + + def blame(self): + """ return a list of tuples of three elements: + (revision, commiter, line) + """ + out = self._svn('blame') + result = [] + blamelines = out.splitlines() + reallines = py.path.svnurl(self.url).readlines() + for i, (blameline, line) in enumerate( + zip(blamelines, reallines)): + m = rex_blame.match(blameline) + if not m: + raise ValueError("output line %r of svn blame does not match " + "expected format" % (line, )) + rev, name, _ = m.groups() + result.append((int(rev), name, line)) + return result + + _rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL) + def commit(self, msg='', rec=1): + """ commit with support for non-recursive commits """ + # XXX i guess escaping should be done better here?!? + cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),) + if not rec: + cmd += ' -N' + out = self._authsvn(cmd) + try: + del cache.info[self] + except KeyError: + pass + if out: + m = self._rex_commit.match(out) + return int(m.group(1)) + + def propset(self, name, value, *args): + """ set property name to value on this path. """ + d = py.path.local.mkdtemp() + try: + p = d.join('value') + p.write(value) + self._svn('propset', name, '--file', str(p), *args) + finally: + d.remove() + + def propget(self, name): + """ get property name on this path. """ + res = self._svn('propget', name) + return res[:-1] # strip trailing newline + + def propdel(self, name): + """ delete property name on this path. """ + res = self._svn('propdel', name) + return res[:-1] # strip trailing newline + + def proplist(self, rec=0): + """ return a mapping of property names to property values. +If rec is True, then return a dictionary mapping sub-paths to such mappings. +""" + if rec: + res = self._svn('proplist -R') + return make_recursive_propdict(self, res) + else: + res = self._svn('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return PropListDict(self, lines) + + def revert(self, rec=0): + """ revert the local changes of this path. if rec is True, do so +recursively. """ + if rec: + result = self._svn('revert -R') + else: + result = self._svn('revert') + return result + + def new(self, **kw): + """ create a modified version of this path. A 'rev' argument + indicates a new revision. + the following keyword arguments modify various path parts: + + http://host.com/repo/path/file.ext + |-----------------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + if kw: + localpath = self.localpath.new(**kw) + else: + localpath = self.localpath + return self.__class__(localpath, auth=self.auth) + + def join(self, *args, **kwargs): + """ return a new Path (with the same revision) which is composed + of the self Path followed by 'args' path components. + """ + if not args: + return self + localpath = self.localpath.join(*args, **kwargs) + return self.__class__(localpath, auth=self.auth) + + def info(self, usecache=1): + """ return an Info structure with svn-provided information. """ + info = usecache and cache.info.get(self) + if not info: + try: + output = self._svn('info') + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('Path is not a working copy directory') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find("is not under version control") != -1: + raise py.error.ENOENT(self, e.err) + raise + # XXX SVN 1.3 has output on stderr instead of stdout (while it does + # return 0!), so a bit nasty, but we assume no output is output + # to stderr... + if (output.strip() == '' or + output.lower().find('not a versioned resource') != -1): + raise py.error.ENOENT(self, output) + info = InfoSvnWCCommand(output) + + # Can't reliably compare on Windows without access to win32api + if py.std.sys.platform != 'win32': + if info.path != self.localpath: + raise py.error.ENOENT(self, "not a versioned resource:" + + " %s != %s" % (info.path, self.localpath)) + cache.info[self] = info + return info + + def listdir(self, fil=None, sort=None): + """ return a sequence of Paths. + + listdir will return either a tuple or a list of paths + depending on implementation choices. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + # XXX unify argument naming with LocalPath.listdir + def notsvn(path): + return path.basename != '.svn' + + paths = [] + for localpath in self.localpath.listdir(notsvn): + p = self.__class__(localpath, auth=self.auth) + if notsvn(p) and (not fil or fil(p)): + paths.append(p) + self._sortlist(paths, sort) + return paths + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return open(self.strpath, mode) + + def _getbyspec(self, spec): + return self.localpath._getbyspec(spec) + + class Checkers(py.path.local.Checkers): + def __init__(self, path): + self.svnwcpath = path + self.path = path.localpath + def versioned(self): + try: + s = self.svnwcpath.info() + except (py.error.ENOENT, py.error.EEXIST): + return False + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('is not a working copy')!=-1: + return False + if e.err.lower().find('not a versioned resource') != -1: + return False + raise + else: + return True + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() # make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + locale_env = fixlocale() + # some blather on stderr + auth_opt = self._makeauthoptions() + #stdin, stdout, stderr = os.popen3(locale_env + + # 'svn log --xml %s %s %s "%s"' % ( + # rev_opt, verbose_opt, auth_opt, + # self.strpath)) + cmd = locale_env + 'svn log --xml %s %s %s "%s"' % ( + rev_opt, verbose_opt, auth_opt, self.strpath) + + popen = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + ) + stdout, stderr = popen.communicate() + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + minidom,ExpatError = importxml() + try: + tree = minidom.parseString(stdout) + except ExpatError: + raise ValueError('no such revision') + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(LogEntry(logentry)) + return result + + def size(self): + """ Return the size of the file content of the Path. """ + return self.info().size + + def mtime(self): + """ Return the last modification time of the file. """ + return self.info().mtime + + def __hash__(self): + return hash((self.strpath, self.__class__, self.auth)) + + +class WCStatus: + attrnames = ('modified','added', 'conflict', 'unchanged', 'external', + 'deleted', 'prop_modified', 'unknown', 'update_available', + 'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced' + ) + + def __init__(self, wcpath, rev=None, modrev=None, author=None): + self.wcpath = wcpath + self.rev = rev + self.modrev = modrev + self.author = author + + for name in self.attrnames: + setattr(self, name, []) + + def allpath(self, sort=True, **kw): + d = {} + for name in self.attrnames: + if name not in kw or kw[name]: + for path in getattr(self, name): + d[path] = 1 + l = d.keys() + if sort: + l.sort() + return l + + # XXX a bit scary to assume there's always 2 spaces between username and + # path, however with win32 allowing spaces in user names there doesn't + # seem to be a more solid approach :( + _rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)') + + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ return a new WCStatus object from data 's' + """ + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + for line in data.split('\n'): + if not line.strip(): + continue + #print "processing %r" % line + flags, rest = line[:8], line[8:] + # first column + c0,c1,c2,c3,c4,c5,x6,c7 = flags + #if '*' in line: + # print "flags", repr(flags), "rest", repr(rest) + + if c0 in '?XI': + fn = line.split(None, 1)[1] + if c0 == '?': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.unknown.append(wcpath) + elif c0 == 'X': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(fn, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + elif c0 == 'I': + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.ignored.append(wcpath) + + continue + + #elif c0 in '~!' or c4 == 'S': + # raise NotImplementedError("received flag %r" % c0) + + m = WCStatus._rex_status.match(rest) + if not m: + if c7 == '*': + fn = rest.strip() + wcpath = rootwcpath.join(fn, abs=1) + rootstatus.update_available.append(wcpath) + continue + if line.lower().find('against revision:')!=-1: + update_rev = int(rest.split(':')[1].strip()) + continue + if line.lower().find('status on external') > -1: + # XXX not sure what to do here... perhaps we want to + # store some state instead of just continuing, as right + # now it makes the top-level external get added twice + # (once as external, once as 'normal' unchanged item) + # because of the way SVN presents external items + continue + # keep trying + raise ValueError("could not parse line %r" % line) + else: + rev, modrev, author, fn = m.groups() + wcpath = rootwcpath.join(fn, abs=1) + #assert wcpath.check() + if c0 == 'M': + assert wcpath.check(file=1), "didn't expect a directory with changed content here" + rootstatus.modified.append(wcpath) + elif c0 == 'A' or c3 == '+' : + rootstatus.added.append(wcpath) + elif c0 == 'D': + rootstatus.deleted.append(wcpath) + elif c0 == 'C': + rootstatus.conflict.append(wcpath) + elif c0 == '~': + rootstatus.kindmismatch.append(wcpath) + elif c0 == '!': + rootstatus.incomplete.append(wcpath) + elif c0 == 'R': + rootstatus.replaced.append(wcpath) + elif not c0.strip(): + rootstatus.unchanged.append(wcpath) + else: + raise NotImplementedError("received flag %r" % c0) + + if c1 == 'M': + rootstatus.prop_modified.append(wcpath) + # XXX do we cover all client versions here? + if c2 == 'L' or c5 == 'K': + rootstatus.locked.append(wcpath) + if c7 == '*': + rootstatus.update_available.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + if update_rev: + rootstatus.update_rev = update_rev + continue + return rootstatus + fromstring = staticmethod(fromstring) + +class XMLWCStatus(WCStatus): + def fromstring(data, rootwcpath, rev=None, modrev=None, author=None): + """ parse 'data' (XML string as outputted by svn st) into a status obj + """ + # XXX for externals, the path is shown twice: once + # with external information, and once with full info as if + # the item was a normal non-external... the current way of + # dealing with this issue is by ignoring it - this does make + # externals appear as external items as well as 'normal', + # unchanged ones in the status object so this is far from ideal + rootstatus = WCStatus(rootwcpath, rev, modrev, author) + update_rev = None + minidom, ExpatError = importxml() + try: + doc = minidom.parseString(data) + except ExpatError: + e = sys.exc_info()[1] + raise ValueError(str(e)) + urevels = doc.getElementsByTagName('against') + if urevels: + rootstatus.update_rev = urevels[-1].getAttribute('revision') + for entryel in doc.getElementsByTagName('entry'): + path = entryel.getAttribute('path') + statusel = entryel.getElementsByTagName('wc-status')[0] + itemstatus = statusel.getAttribute('item') + + if itemstatus == 'unversioned': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.unknown.append(wcpath) + continue + elif itemstatus == 'external': + wcpath = rootwcpath.__class__( + rootwcpath.localpath.join(path, abs=1), + auth=rootwcpath.auth) + rootstatus.external.append(wcpath) + continue + elif itemstatus == 'ignored': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.ignored.append(wcpath) + continue + elif itemstatus == 'incomplete': + wcpath = rootwcpath.join(path, abs=1) + rootstatus.incomplete.append(wcpath) + continue + + rev = statusel.getAttribute('revision') + if itemstatus == 'added' or itemstatus == 'none': + rev = '0' + modrev = '?' + author = '?' + date = '' + else: + #print entryel.toxml() + commitel = entryel.getElementsByTagName('commit')[0] + if commitel: + modrev = commitel.getAttribute('revision') + author = '' + author_els = commitel.getElementsByTagName('author') + if author_els: + for c in author_els[0].childNodes: + author += c.nodeValue + date = '' + for c in commitel.getElementsByTagName('date')[0]\ + .childNodes: + date += c.nodeValue + + wcpath = rootwcpath.join(path, abs=1) + + assert itemstatus != 'modified' or wcpath.check(file=1), ( + 'did\'t expect a directory with changed content here') + + itemattrname = { + 'normal': 'unchanged', + 'unversioned': 'unknown', + 'conflicted': 'conflict', + 'none': 'added', + }.get(itemstatus, itemstatus) + + attr = getattr(rootstatus, itemattrname) + attr.append(wcpath) + + propsstatus = statusel.getAttribute('props') + if propsstatus not in ('none', 'normal'): + rootstatus.prop_modified.append(wcpath) + + if wcpath == rootwcpath: + rootstatus.rev = rev + rootstatus.modrev = modrev + rootstatus.author = author + rootstatus.date = date + + # handle repos-status element (remote info) + rstatusels = entryel.getElementsByTagName('repos-status') + if rstatusels: + rstatusel = rstatusels[0] + ritemstatus = rstatusel.getAttribute('item') + if ritemstatus in ('added', 'modified'): + rootstatus.update_available.append(wcpath) + + lockels = entryel.getElementsByTagName('lock') + if len(lockels): + rootstatus.locked.append(wcpath) + + return rootstatus + fromstring = staticmethod(fromstring) + +class InfoSvnWCCommand: + def __init__(self, output): + # Path: test + # URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test + # Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada + # Revision: 2151 + # Node Kind: directory + # Schedule: normal + # Last Changed Author: hpk + # Last Changed Rev: 2100 + # Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + # Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003) + + d = {} + for line in output.split('\n'): + if not line.strip(): + continue + key, value = line.split(':', 1) + key = key.lower().replace(' ', '') + value = value.strip() + d[key] = value + try: + self.url = d['url'] + except KeyError: + raise ValueError("Not a versioned resource") + #raise ValueError, "Not a versioned resource %r" % path + self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] + self.rev = int(d['revision']) + self.path = py.path.local(d['path']) + self.size = self.path.size() + if 'lastchangedrev' in d: + self.created_rev = int(d['lastchangedrev']) + if 'lastchangedauthor' in d: + self.last_author = d['lastchangedauthor'] + if 'lastchangeddate' in d: + self.mtime = parse_wcinfotime(d['lastchangeddate']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + +def parse_wcinfotime(timestr): + """ Returns seconds since epoch, UTC. """ + # example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003) + m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr) + if not m: + raise ValueError("timestring %r does not match" % timestr) + timestr, timezone = m.groups() + # do not handle timezone specially, return value should be UTC + parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S") + return calendar.timegm(parsedtime) + +def make_recursive_propdict(wcroot, + output, + rex = re.compile("Properties on '(.*)':")): + """ Return a dictionary of path->PropListDict mappings. """ + lines = [x for x in output.split('\n') if x] + pdict = {} + while lines: + line = lines.pop(0) + m = rex.match(line) + if not m: + raise ValueError("could not parse propget-line: %r" % line) + path = m.groups()[0] + wcpath = wcroot.join(path, abs=1) + propnames = [] + while lines and lines[0].startswith(' '): + propname = lines.pop(0).strip() + propnames.append(propname) + assert propnames, "must have found properties!" + pdict[wcpath] = PropListDict(wcpath, propnames) + return pdict + + +def importxml(cache=[]): + if cache: + return cache + from xml.dom import minidom + from xml.parsers.expat import ExpatError + cache.extend([minidom, ExpatError]) + return cache + +class LogEntry: + def __init__(self, logentry): + self.rev = int(logentry.getAttribute('revision')) + for lpart in filter(None, logentry.childNodes): + if lpart.nodeType == lpart.ELEMENT_NODE: + if lpart.nodeName == 'author': + self.author = lpart.firstChild.nodeValue + elif lpart.nodeName == 'msg': + if lpart.firstChild: + self.msg = lpart.firstChild.nodeValue + else: + self.msg = '' + elif lpart.nodeName == 'date': + #2003-07-29T20:05:11.598637Z + timestr = lpart.firstChild.nodeValue + self.date = parse_apr_time(timestr) + elif lpart.nodeName == 'paths': + self.strpaths = [] + for ppart in filter(None, lpart.childNodes): + if ppart.nodeType == ppart.ELEMENT_NODE: + self.strpaths.append(PathEntry(ppart)) + def __repr__(self): + return '' % ( + self.rev, self.author, self.date) + + --- /dev/null +++ b/py/_log/__init__.py @@ -0,0 +1,2 @@ +""" logging API ('producers' and 'consumers' connected via keywords) """ + --- /dev/null +++ b/py/_io/__init__.py @@ -0,0 +1,1 @@ +""" input/output helping """ --- /dev/null +++ b/py/_io/capture.py @@ -0,0 +1,344 @@ +import os +import sys +import py +import tempfile + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +if sys.version_info < (3,0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + data = unicode(data, getattr(self, '_encoding', 'UTF-8')) + StringIO.write(self, data) +else: + TextIO = StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" %(data,)) + StringIO.write(self, data) + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(targetfd) + os.dup2(self.tmpfile.fileno(), targetfd) + self._patched = [] + + def setasfile(self, name, module=sys): + """ patch . to self.tmpfile + """ + key = (module, name) + self._patched.append((key, getattr(module, name))) + setattr(module, name, self.tmpfile) + + def unsetfiles(self): + """ unpatch all patched items + """ + while self._patched: + (module, name), value = self._patched.pop() + setattr(module, name, value) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + self.unsetfiles() + os.close(self._savefd) + self.tmpfile.seek(0) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + mode = mode and mode or f.mode + if sys.version_info >= (3,0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=False) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + elif isinstance(obj, str): + pass + else: + obj = str(obj) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + +class Capture(object): + def call(cls, func, *args, **kwargs): + """ return a (res, out, err) tuple where + out and err represent the output/error output + during function execution. + call the given function with args/kwargs + and capture output/error during its execution. + """ + so = cls() + try: + res = func(*args, **kwargs) + finally: + out, err = so.reset() + return res, out, err + call = classmethod(call) + + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_suspended'): + outfile = self._kwargs['out'] + errfile = self._kwargs['err'] + del self._kwargs + else: + outfile, errfile = self.done() + out, err = "", "" + if outfile: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + assert not hasattr(self, '_suspended') + self._suspended = True + outerr = self.readouterr() + outfile, errfile = self.done() + self._kwargs['out'] = outfile + self._kwargs['err'] = errfile + return outerr + + def resume(self): + """ resume capturing with original temp files. """ + assert self._suspended + self._initialize(**self._kwargs) + del self._suspended + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin) + """ + def __init__(self, out=True, err=True, + mixed=False, in_=True, patchsys=True): + self._kwargs = locals().copy() + del self._kwargs['self'] + self._initialize(**self._kwargs) + + def _initialize(self, out=True, err=True, + mixed=False, in_=True, patchsys=True): + if in_: + self._oldin = (sys.stdin, os.dup(0)) + sys.stdin = DontReadFromInput() + fd = os.open(devnullpath, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + self.out = py.io.FDCapture(1, tmpfile=tmpfile) + if patchsys: + self.out.setasfile('stdout') + if err: + if mixed and out: + tmpfile = self.out.tmpfile + elif hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + self.err = py.io.FDCapture(2, tmpfile=tmpfile) + if patchsys: + self.err.setasfile('stderr') + + def done(self): + """ return (outfile, errfile) and stop capturing. """ + if hasattr(self, 'out'): + outfile = self.out.done() + else: + outfile = None + if hasattr(self, 'err'): + errfile = self.err.done() + else: + errfile = None + if hasattr(self, '_oldin'): + oldsys, oldfd = self._oldin + os.dup2(oldfd, 0) + os.close(oldfd) + sys.stdin = oldsys + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + l = [] + for name in ('out', 'err'): + res = "" + if hasattr(self, name): + f = getattr(self, name).tmpfile + f.seek(0) + res = f.read() + f.truncate(0) + f.seek(0) + l.append(res) + return l + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True, mixed=False): + self._kwargs = locals().copy() + del self._kwargs['self'] + self._initialize(**self._kwargs) + + def _initialize(self, out, err, in_, mixed): + self._out = out + self._err = err + self._in = in_ + if out: + self._oldout = sys.stdout + if not hasattr(out, 'write'): + out = TextIO() + sys.stdout = self.out = out + if err: + self._olderr = sys.stderr + if out and mixed: + err = self.out + elif not hasattr(err, 'write'): + err = TextIO() + sys.stderr = self.err = err + if in_: + self._oldin = sys.stdin + sys.stdin = self.newin = DontReadFromInput() + + def done(self): + """ return (outfile, errfile) and stop capturing. """ + o,e = sys.stdout, sys.stderr + if self._out: + try: + sys.stdout = self._oldout + except AttributeError: + raise IOError("stdout capturing already reset") + del self._oldout + outfile = self.out + outfile.seek(0) + else: + outfile = None + if self._err: + try: + sys.stderr = self._olderr + except AttributeError: + raise IOError("stderr capturing already reset") + del self._olderr + errfile = self.err + errfile.seek(0) + else: + errfile = None + if self._in: + sys.stdin = self._oldin + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self._out: + out = sys.stdout.getvalue() + sys.stdout.truncate(0) + if self._err: + err = sys.stderr.getvalue() + sys.stderr.truncate(0) + return out, err + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + def isatty(self): + return False + +try: + devnullpath = os.devnull +except AttributeError: + if os.name == 'nt': + devnullpath = 'NUL' + else: + devnullpath = '/dev/null' + + --- /dev/null +++ b/py/_plugin/hookspec.py @@ -0,0 +1,179 @@ +""" +hook specifications for py.test plugins +""" + +# ------------------------------------------------------------------------- +# Command line and configuration +# ------------------------------------------------------------------------- + +def pytest_addoption(parser): + """ called before commandline parsing. """ + +def pytest_namespace(): + """ return dict of name->object which will get stored at py.test. namespace""" + +def pytest_configure(config): + """ called after command line options have been parsed. + and all plugins and initial conftest files been loaded. + """ + +def pytest_unconfigure(config): + """ called before test process is exited. """ + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + +def pytest_collect_directory(path, parent): + """ return Collection node or None for the given path. """ +pytest_collect_directory.firstresult = True + +def pytest_collect_file(path, parent): + """ return Collection node or None for the given path. """ + +def pytest_collectstart(collector): + """ collector starts collecting. """ + +def pytest_collectreport(report): + """ collector finished collecting. """ + +def pytest_deselected(items): + """ called for test items deselected by keyword. """ + +def pytest_make_collect_report(collector): + """ perform a collection and return a collection. """ +pytest_make_collect_report.firstresult = True + +# XXX rename to item_collected()? meaning in distribution context? +def pytest_itemstart(item, node=None): + """ test item gets collected. """ + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + +def pytest_pycollect_makeitem(collector, name, obj): + """ return custom item/collector for a python object in a module, or None. """ +pytest_pycollect_makeitem.firstresult = True + +def pytest_pyfunc_call(pyfuncitem): + """ call underlying test function. """ +pytest_pyfunc_call.firstresult = True + +def pytest_generate_tests(metafunc): + """ generate (multiple) parametrized calls to a test function.""" + +# ------------------------------------------------------------------------- +# generic runtest related hooks +# ------------------------------------------------------------------------- + +def pytest_runtest_protocol(item): + """ implement fixture, run and report about the given test item. """ +pytest_runtest_protocol.firstresult = True + +def pytest_runtest_setup(item): + """ called before pytest_runtest_call(). """ + +def pytest_runtest_call(item): + """ execute test item. """ + +def pytest_runtest_teardown(item): + """ called after pytest_runtest_call(). """ + +def pytest_runtest_makereport(item, call): + """ make a test report for the given item and call outcome. """ +pytest_runtest_makereport.firstresult = True + +def pytest_runtest_logreport(report): + """ process item test report. """ + +# special handling for final teardown - somewhat internal for now +def pytest__teardown_final(session): + """ called before test session finishes. """ +pytest__teardown_final.firstresult = True + +def pytest__teardown_final_logerror(report): + """ called if runtest_teardown_final failed. """ + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + +def pytest_sessionstart(session): + """ before session.main() is called. """ + +def pytest_sessionfinish(session, exitstatus): + """ whole test run finishes. """ + +# ------------------------------------------------------------------------- +# hooks for influencing reporting (invoked from pytest_terminal) +# ------------------------------------------------------------------------- + +def pytest_report_header(config): + """ return a string to be displayed as header info for terminal reporting.""" + +def pytest_report_teststatus(report): + """ return result-category, shortletter and verbose word for reporting.""" +pytest_report_teststatus.firstresult = True + +def pytest_terminal_summary(terminalreporter): + """ add additional section in terminal summary reporting. """ + +def pytest_report_iteminfo(item): + """ return (fspath, lineno, name) for the item. + the information is used for result display and to sort tests + """ +pytest_report_iteminfo.firstresult = True + +# ------------------------------------------------------------------------- +# doctest hooks +# ------------------------------------------------------------------------- + +def pytest_doctest_prepare_content(content): + """ return processed content for a given doctest""" +pytest_doctest_prepare_content.firstresult = True + +# ------------------------------------------------------------------------- +# distributed testing +# ------------------------------------------------------------------------- + +def pytest_gwmanage_newgateway(gateway, platinfo): + """ called on new raw gateway creation. """ + +def pytest_gwmanage_rsyncstart(source, gateways): + """ called before rsyncing a directory to remote gateways takes place. """ + +def pytest_gwmanage_rsyncfinish(source, gateways): + """ called after rsyncing a directory to remote gateways takes place. """ + +def pytest_testnodeready(node): + """ Test Node is ready to operate. """ + +def pytest_testnodedown(node, error): + """ Test Node is down. """ + +def pytest_rescheduleitems(items): + """ reschedule Items from a node that went down. """ + +def pytest_looponfailinfo(failreports, rootdirs): + """ info for repeating failing tests. """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + +def pytest_plugin_registered(plugin, manager): + """ a new py lib plugin got registered. """ + +def pytest_plugin_unregistered(plugin): + """ a py lib plugin got unregistered. """ + +def pytest_internalerror(excrepr): + """ called for internal errors. """ + +def pytest_keyboard_interrupt(excinfo): + """ called for keyboard interrupt. """ + +def pytest_trace(category, msg): + """ called for debug info. """ --- /dev/null +++ b/py/_plugin/pytest_default.py @@ -0,0 +1,107 @@ +""" default hooks and general py.test options. """ + +import sys +import py + +def pytest_pyfunc_call(__multicall__, pyfuncitem): + if not __multicall__.execute(): + testfunction = pyfuncitem.obj + if pyfuncitem._isyieldedfunction(): + testfunction(*pyfuncitem._args) + else: + funcargs = pyfuncitem.funcargs + testfunction(**funcargs) + +def pytest_collect_file(path, parent): + ext = path.ext + pb = path.purebasename + if pb.startswith("test_") or pb.endswith("_test") or \ + path in parent.config._argfspaths: + if ext == ".py": + return parent.Module(path, parent=parent) + +def pytest_funcarg__pytestconfig(request): + """ the pytest config object with access to command line opts.""" + return request.config + +def pytest_collect_directory(path, parent): + # XXX reconsider the following comment + # not use parent.Directory here as we generally + # want dir/conftest.py to be able to + # define Directory(dir) already + if not parent.recfilter(path): # by default special ".cvs", ... + # check if cmdline specified this dir or a subdir directly + for arg in parent.config._argfspaths: + if path == arg or arg.relto(path): + break + else: + return + Directory = parent.config._getcollectclass('Directory', path) + return Directory(path, parent=parent) + +def pytest_report_iteminfo(item): + return item.reportinfo() + +def pytest_addoption(parser): + group = parser.getgroup("general", "running and selection options") + group._addoption('-x', '--exitfirst', + action="store_true", dest="exitfirst", default=False, + help="exit instantly on first error or failed test."), + group._addoption('-k', + action="store", dest="keyword", default='', + help="only run test items matching the given " + "space separated keywords. precede a keyword with '-' to negate. " + "Terminate the expression with ':' to treat a match as a signal " + "to run all subsequent tests. ") + + group = parser.getgroup("collect", "collection") + group.addoption('--collectonly', + action="store_true", dest="collectonly", + help="only collect tests, don't execute them."), + group.addoption("--ignore", action="append", metavar="path", + help="ignore path during collection (multi-allowed).") + group.addoption('--confcutdir', dest="confcutdir", default=None, + metavar="dir", + help="only load conftest.py's relative to specified dir.") + + group = parser.getgroup("debugconfig", + "test process debugging and configuration") + group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", + help="base temporary directory for this test run.") + +def pytest_configure(config): + setsession(config) + +def setsession(config): + val = config.getvalue + if val("collectonly"): + from py._test.session import Session + config.setsessionclass(Session) + +# pycollect related hooks and code, should move to pytest_pycollect.py + +def pytest_pycollect_makeitem(__multicall__, collector, name, obj): + res = __multicall__.execute() + if res is not None: + return res + if collector._istestclasscandidate(name, obj): + res = collector._deprecated_join(name) + if res is not None: + return res + return collector.Class(name, parent=collector) + elif collector.funcnamefilter(name) and hasattr(obj, '__call__'): + res = collector._deprecated_join(name) + if res is not None: + return res + if is_generator(obj): + # XXX deprecation warning + return collector.Generator(name, parent=collector) + else: + return collector._genfunctions(name, obj) + +def is_generator(func): + try: + return py.code.getrawcode(func).co_flags & 32 # generator function + except AttributeError: # builtin functions have no bytecode + # assume them to not be generators + return False --- /dev/null +++ b/py/_code/oldmagic2.py @@ -0,0 +1,6 @@ + +import py + +py.log._apiwarn("1.1", "py.magic.AssertionError is deprecated, use py.code._AssertionError", stacklevel=2) + +from py.code import _AssertionError as AssertionError --- /dev/null +++ b/py/_cmdline/pycleanup.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +"""\ +py.cleanup [PATH] + +Delete pyc file recursively, starting from PATH (which defaults to the current +working directory). Don't follow links and don't recurse into directories with +a ".". +""" +import py + +def main(): + parser = py.std.optparse.OptionParser(usage=__doc__) + parser.add_option("-e", "--remove", dest="ext", default=".pyc", action="store", + help="remove files with the given comma-separated list of extensions" + ) + parser.add_option("-n", "--dryrun", dest="dryrun", default=False, + action="store_true", + help="display would-be-removed filenames" + ) + parser.add_option("-d", action="store_true", dest="removedir", + help="remove empty directories") + (options, args) = parser.parse_args() + if not args: + args = ["."] + ext = options.ext.split(",") + def shouldremove(p): + return p.ext in ext + + for arg in args: + path = py.path.local(arg) + py.builtin.print_("cleaning path", path, "of extensions", ext) + for x in path.visit(shouldremove, lambda x: x.check(dotfile=0, link=0)): + remove(x, options) + if options.removedir: + for x in path.visit(lambda x: x.check(dir=1), + lambda x: x.check(dotfile=0, link=0)): + if not x.listdir(): + remove(x, options) + +def remove(path, options): + if options.dryrun: + py.builtin.print_("would remove", path) + else: + py.builtin.print_("removing", path) + path.remove() + --- /dev/null +++ b/py/_cmdline/pysvnwcrevert.py @@ -0,0 +1,55 @@ +#! /usr/bin/env python +"""\ +py.svnwcrevert [options] WCPATH + +Running this script and then 'svn up' puts the working copy WCPATH in a state +as clean as a fresh check-out. + +WARNING: you'll loose all local changes, obviously! + +This script deletes all files that have been modified +or that svn doesn't explicitly know about, including svn:ignored files +(like .pyc files, hint hint). + +The goal of this script is to leave the working copy with some files and +directories possibly missing, but - most importantly - in a state where +the following 'svn up' won't just crash. +""" + +import sys, py + +def kill(p, root): + print('< %s' % (p.relto(root),)) + p.remove(rec=1) + +def svnwcrevert(path, root=None, precious=[]): + if root is None: + root = path + wcpath = py.path.svnwc(path) + try: + st = wcpath.status() + except ValueError: # typically, "bad char in wcpath" + kill(path, root) + return + for p in path.listdir(): + if p.basename == '.svn' or p.basename in precious: + continue + wcp = py.path.svnwc(p) + if wcp not in st.unchanged and wcp not in st.external: + kill(p, root) + elif p.check(dir=1): + svnwcrevert(p, root) + +# XXX add a functional test + +parser = py.std.optparse.OptionParser(usage=__doc__) +parser.add_option("-p", "--precious", + action="append", dest="precious", default=[], + help="preserve files with this name") + +def main(): + opts, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(2) + svnwcrevert(py.path.local(args[0]), precious=opts.precious) --- /dev/null +++ b/py/_plugin/pytest_genscript.py @@ -0,0 +1,67 @@ +#! /usr/bin/env python +""" +generate standalone test script to be distributed along with an application. +""" + +import os +import zlib +import base64 +import sys +try: + import pickle +except Importerror: + import cPickle as pickle + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group.addoption("--genscript", action="store", default=None, + dest="genscript", metavar="path", + help="create standalone py.test script at given target path.") + +def pytest_configure(config): + genscript = config.getvalue("genscript") + if genscript: + import py + mydir = py.path.local(__file__).dirpath() + infile = mydir.join("standalonetemplate.py") + pybasedir = py.path.local(py.__file__).dirpath().dirpath() + main(pybasedir, outfile=genscript, infile=infile) + raise SystemExit(0) + +def main(pybasedir, outfile, infile): + os.chdir(str(pybasedir)) + outfile = str(outfile) + infile = str(infile) + files = [] + for dirpath, dirnames, filenames in os.walk("py"): + for f in filenames: + if not f.endswith(".py"): + continue + + fn = os.path.join(dirpath, f) + files.append(fn) + + name2src = {} + for f in files: + k = f.replace(os.sep, ".")[:-3] + name2src[k] = open(f, "r").read() + + data = pickle.dumps(name2src, 2) + data = zlib.compress(data, 9) + data = base64.encodestring(data) + data = data.decode("ascii") + + exe = open(infile, "r").read() + exe = exe.replace("@SOURCES@", data) + + open(outfile, "w").write(exe) + os.chmod(outfile, 493) # 0755 + sys.stdout.write("generated standalone py.test at %r, have fun!\n" % outfile) + +if __name__=="__main__": + dn = os.path.dirname + here = os.path.abspath(dn(__file__)) # py/plugin/ + pybasedir = dn(dn(here)) + outfile = os.path.join(os.getcwd(), "py.test-standalone") + infile = os.path.join(here, 'standalonetemplate.py') + main(pybasedir, outfile, infile) --- /dev/null +++ b/py/_path/gateway/remotepath.py @@ -0,0 +1,47 @@ +import py, itertools +from py._path import common + +COUNTER = itertools.count() + +class RemotePath(common.PathBase): + sep = '/' + + def __init__(self, channel, id, basename=None): + self._channel = channel + self._id = id + self._basename = basename + self._specs = {} + + def __del__(self): + self._channel.send(('DEL', self._id)) + + def __repr__(self): + return 'RemotePath(%s)' % self.basename + + def listdir(self, *args): + self._channel.send(('LIST', self._id) + args) + return [RemotePath(self._channel, id, basename) + for (id, basename) in self._channel.receive()] + + def dirpath(self): + id = ~COUNTER.next() + self._channel.send(('DIRPATH', self._id, id)) + return RemotePath(self._channel, id) + + def join(self, *args): + id = ~COUNTER.next() + self._channel.send(('JOIN', self._id, id) + args) + return RemotePath(self._channel, id) + + def _getbyspec(self, spec): + parts = spec.split(',') + ask = [x for x in parts if x not in self._specs] + if ask: + self._channel.send(('GET', self._id, ",".join(ask))) + for part, value in zip(ask, self._channel.receive()): + self._specs[part] = value + return [self._specs[x] for x in parts] + + def read(self): + self._channel.send(('READ', self._id)) + return self._channel.receive() --- /dev/null +++ b/py/_plugin/pytest_doctest.py @@ -0,0 +1,100 @@ +""" +collect and execute doctests from modules and test files. + +Usage +------------- + +By default all files matching the ``test*.txt`` pattern will +be run through the python standard ``doctest`` module. Issue:: + + py.test --doctest-glob='*.rst' + +to change the pattern. Additionally you can trigger running of +tests in all python modules (including regular python test modules):: + + py.test --doctest-modules + +You can also make these changes permanent in your project by +putting them into a conftest.py file like this:: + + # content of conftest.py + option_doctestmodules = True + option_doctestglob = "*.rst" +""" + +import py +from py._code.code import TerminalRepr, ReprFileLocation +import doctest + +def pytest_addoption(parser): + group = parser.getgroup("collect") + group.addoption("--doctest-modules", + action="store_true", default=False, + help="run doctests in all .py modules", + dest="doctestmodules") + group.addoption("--doctest-glob", + action="store", default="test*.txt", metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") + +def pytest_collect_file(path, parent): + config = parent.config + if path.ext == ".py": + if config.getvalue("doctestmodules"): + return DoctestModule(path, parent) + elif path.check(fnmatch=config.getvalue("doctestglob")): + return DoctestTextfile(path, parent) + +class ReprFailDoctest(TerminalRepr): + def __init__(self, reprlocation, lines): + self.reprlocation = reprlocation + self.lines = lines + def toterminal(self, tw): + for line in self.lines: + tw.line(line) + self.reprlocation.toterminal(tw) + +class DoctestItem(py.test.collect.Item): + def __init__(self, path, parent): + name = self.__class__.__name__ + ":" + path.basename + super(DoctestItem, self).__init__(name=name, parent=parent) + self.fspath = path + + def repr_failure(self, excinfo): + if excinfo.errisinstance(doctest.DocTestFailure): + doctestfailure = excinfo.value + example = doctestfailure.example + test = doctestfailure.test + filename = test.filename + lineno = test.lineno + example.lineno + 1 + message = excinfo.type.__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = doctest.OutputChecker() + REPORT_UDIFF = doctest.REPORT_UDIFF + filelines = py.path.local(filename).readlines(cr=0) + i = max(test.lineno, max(0, lineno - 10)) # XXX? + lines = [] + for line in filelines[i:lineno]: + lines.append("%03d %s" % (i+1, line)) + i += 1 + lines += checker.output_difference(example, + doctestfailure.got, REPORT_UDIFF).split("\n") + return ReprFailDoctest(reprlocation, lines) + elif excinfo.errisinstance(doctest.UnexpectedException): + excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) + return super(DoctestItem, self).repr_failure(excinfo) + else: + return super(DoctestItem, self).repr_failure(excinfo) + +class DoctestTextfile(DoctestItem): + def runtest(self): + if not self._deprecated_testexecution(): + failed, tot = doctest.testfile( + str(self.fspath), module_relative=False, + raise_on_error=True, verbose=0) + +class DoctestModule(DoctestItem): + def runtest(self): + module = self.fspath.pyimport() + failed, tot = doctest.testmod( + module, raise_on_error=True, verbose=0) --- /dev/null +++ b/py/_plugin/pytest_assertion.py @@ -0,0 +1,31 @@ +import py +import sys + +def pytest_addoption(parser): + group = parser.getgroup("debugconfig") + group._addoption('--no-assert', action="store_true", default=False, + dest="noassert", + help="disable python assert expression reinterpretation."), + +def pytest_configure(config): + #if sys.platform.startswith("java"): + # return # XXX assertions don't work yet with jython 2.5.1 + + if not config.getvalue("noassert") and not config.getvalue("nomagic"): + warn_about_missing_assertion() + config._oldassertion = py.builtin.builtins.AssertionError + py.builtin.builtins.AssertionError = py.code._AssertionError + +def pytest_unconfigure(config): + if hasattr(config, '_oldassertion'): + py.builtin.builtins.AssertionError = config._oldassertion + del config._oldassertion + +def warn_about_missing_assertion(): + try: + assert False + except AssertionError: + pass + else: + py.std.warnings.warn("Assertions are turned off!" + " (are you using python -O?)") --- /dev/null +++ b/py/_code/source.py @@ -0,0 +1,347 @@ +from __future__ import generators +import sys +import inspect, tokenize +import py +cpy_compile = compile + +try: + import _ast + from _ast import PyCF_ONLY_AST as _AST_FLAG +except ImportError: + _AST_FLAG = 0 + _ast = None + + +class Source(object): + """ a immutable object holding a source code fragment, + possibly deindenting it. + """ + def __init__(self, *parts, **kwargs): + self.lines = lines = [] + de = kwargs.get('deindent', True) + rstrip = kwargs.get('rstrip', True) + for part in parts: + if not part: + partlines = [] + if isinstance(part, Source): + partlines = part.lines + elif isinstance(part, py.builtin._basestring): + partlines = part.split('\n') + if rstrip: + while partlines: + if partlines[-1].strip(): + break + partlines.pop() + else: + partlines = getsource(part, deindent=de).lines + if de: + partlines = deindent(partlines) + lines.extend(partlines) + + def __eq__(self, other): + try: + return self.lines == other.lines + except AttributeError: + if isinstance(other, str): + return str(self) == other + return False + + def __getitem__(self, key): + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + return self.__getslice__(key.start, key.stop) + + def __len__(self): + return len(self.lines) + + def __getslice__(self, start, end): + newsource = Source() + newsource.lines = self.lines[start:end] + return newsource + + def strip(self): + """ return new source object with trailing + and leading blank lines removed. + """ + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end-1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def putaround(self, before='', after='', indent=' ' * 4): + """ return a copy of the source object with + 'before' and 'after' wrapped around it. + """ + before = Source(before) + after = Source(after) + newsource = Source() + lines = [ (indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines + return newsource + + def indent(self, indent=' ' * 4): + """ return a copy of the source object with + all lines indented by the given indent-string. + """ + newsource = Source() + newsource.lines = [(indent+line) for line in self.lines] + return newsource + + def getstatement(self, lineno): + """ return Source statement which contains the + given linenumber (counted from 0). + """ + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno): + """ return (start, end) tuple which spans the minimal + statement region which containing the given lineno. + """ + # XXX there must be a better than these heuristic ways ... + # XXX there may even be better heuristics :-) + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + + # 1. find the start of the statement + from codeop import compile_command + for start in range(lineno, -1, -1): + trylines = self.lines[start:lineno+1] + # quick hack to indent the source and get it as a string in one go + trylines.insert(0, 'def xxx():') + trysource = '\n '.join(trylines) + # ^ space here + try: + compile_command(trysource) + except (SyntaxError, OverflowError, ValueError): + pass + else: + break # got a valid or incomplete statement + + # 2. find the end of the statement + for end in range(lineno+1, len(self)+1): + trysource = self[start:end] + if trysource.isparseable(): + break + + return start, end + + def getblockend(self, lineno): + # XXX + lines = [x + '\n' for x in self.lines[lineno:]] + blocklines = inspect.getblock(lines) + #print blocklines + return lineno + len(blocklines) - 1 + + def deindent(self, offset=None): + """ return a new source object deindented by offset. + If offset is None then guess an indentation offset from + the first non-blank line. Subsequent lines which have a + lower indentation offset will be copied verbatim as + they are assumed to be part of multilines. + """ + # XXX maybe use the tokenizer to properly handle multiline + # strings etc.pp? + newsource = Source() + newsource.lines[:] = deindent(self.lines, offset) + return newsource + + def isparseable(self, deindent=True): + """ return True if source is parseable, heuristically + deindenting it by default. + """ + try: + import parser + except ImportError: + syntax_checker = lambda x: compile(x, 'asd', 'exec') + else: + syntax_checker = parser.suite + + if deindent: + source = str(self.deindent()) + else: + source = str(self) + try: + #compile(source+'\n', "x", "exec") + syntax_checker(source+'\n') + except SyntaxError: + return False + else: + return True + + def __str__(self): + return "\n".join(self.lines) + + def compile(self, filename=None, mode='exec', + flag=generators.compiler_flag, + dont_inherit=0, _genframe=None): + """ return compiled code object. if filename is None + invent an artificial filename which displays + the source/line position of the caller frame. + """ + if not filename or py.path.local(filename).check(file=0): + if _genframe is None: + _genframe = sys._getframe(1) # the caller + fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + if not filename: + filename = '' % (fn, lineno) + else: + filename = '' % (filename, fn, lineno) + source = "\n".join(self.lines) + '\n' + try: + co = cpy_compile(source, filename, mode, flag) + except SyntaxError: + ex = sys.exc_info()[1] + # re-represent syntax errors from parsing python strings + msglines = self.lines[:ex.lineno] + if ex.offset: + msglines.append(" "*ex.offset + '^') + msglines.append("syntax error probably generated here: %s" % filename) + newex = SyntaxError('\n'.join(msglines)) + newex.offset = ex.offset + newex.lineno = ex.lineno + newex.text = ex.text + raise newex + else: + if flag & _AST_FLAG: + return co + co_filename = MyStr(filename) + co_filename.__source__ = self + return py.code.Code(co).new(rec=1, co_filename=co_filename) + #return newcode_withfilename(co, co_filename) + +# +# public API shortcut functions +# + +def compile_(source, filename=None, mode='exec', flags= + generators.compiler_flag, dont_inherit=0): + """ compile the given source to a raw code object, + which points back to the source code through + "co_filename.__source__". All code objects + contained in the code object will recursively + also have this special subclass-of-string + filename. + """ + if _ast is not None and isinstance(source, _ast.AST): + # XXX should Source support having AST? + return cpy_compile(source, filename, mode, flags, dont_inherit) + _genframe = sys._getframe(1) # the caller + s = Source(source) + co = s.compile(filename, mode, flags, _genframe=_genframe) + return co + + +def getfslineno(obj): + try: + code = py.code.Code(obj) + except TypeError: + # fallback to + fn = (py.std.inspect.getsourcefile(obj) or + py.std.inspect.getfile(obj)) + fspath = fn and py.path.local(fn) or None + if fspath: + try: + _, lineno = findsource(obj) + except IOError: + lineno = None + else: + lineno = None + else: + fspath = code.path + lineno = code.firstlineno + return fspath, lineno + +# +# helper functions +# +class MyStr(str): + """ custom string which allows to add attributes. """ + +def findsource(obj): + obj = py.code.getrawcode(obj) + try: + fullsource = obj.co_filename.__source__ + except AttributeError: + try: + sourcelines, lineno = py.std.inspect.findsource(obj) + except (KeyboardInterrupt, SystemExit): + raise + except: + return None, None + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + else: + lineno = obj.co_firstlineno - 1 + return fullsource, lineno + + +def getsource(obj, **kwargs): + obj = py.code.getrawcode(obj) + try: + fullsource = obj.co_filename.__source__ + except AttributeError: + try: + strsrc = inspect.getsource(obj) + except IndentationError: + strsrc = "\"Buggy python version consider upgrading, cannot get source\"" + assert isinstance(strsrc, str) + return Source(strsrc, **kwargs) + else: + lineno = obj.co_firstlineno - 1 + end = fullsource.getblockend(lineno) + return Source(fullsource[lineno:end+1], deident=True) + + +def deindent(lines, offset=None): + if offset is None: + for line in lines: + line = line.expandtabs() + s = line.lstrip() + if s: + offset = len(line)-len(s) + break + else: + offset = 0 + if offset == 0: + return list(lines) + newlines = [] + def readline_generator(lines): + for line in lines: + yield line + '\n' + while True: + yield '' + + r = readline_generator(lines) + try: + readline = r.next + except AttributeError: + readline = r.__next__ + + try: + for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(readline): + if sline > len(lines): + break # End of input reached + if sline > len(newlines): + line = lines[sline - 1].expandtabs() + if line.lstrip() and line[:offset].isspace(): + line = line[offset:] # Deindent + newlines.append(line) + + for i in range(sline, eline): + # Don't deindent continuing lines of + # multiline tokens (i.e. multiline strings) + newlines.append(lines[i]) + except (IndentationError, tokenize.TokenError): + pass + # Add any lines we didn't see. E.g. if an exception was raised. + newlines.extend(lines[len(newlines):]) + return newlines --- /dev/null +++ b/py/_plugin/pytest__pytest.py @@ -0,0 +1,97 @@ +import py + +from py._test.pluginmanager import HookRelay + +def pytest_funcarg___pytest(request): + return PytestArg(request) + +class PytestArg: + def __init__(self, request): + self.request = request + + def gethookrecorder(self, hook): + hookrecorder = HookRecorder(hook._registry) + hookrecorder.start_recording(hook._hookspecs) + self.request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + +class ParsedCall: + def __init__(self, name, locals): + assert '_name' not in locals + self.__dict__.update(locals) + self.__dict__.pop('self') + self._name = name + + def __repr__(self): + d = self.__dict__.copy() + del d['_name'] + return "" %(self._name, d) + +class HookRecorder: + def __init__(self, registry): + self._registry = registry + self.calls = [] + self._recorders = {} + + def start_recording(self, hookspecs): + assert hookspecs not in self._recorders + class RecordCalls: + _recorder = self + for name, method in vars(hookspecs).items(): + if name[0] != "_": + setattr(RecordCalls, name, self._makecallparser(method)) + recorder = RecordCalls() + self._recorders[hookspecs] = recorder + self._registry.register(recorder) + self.hook = HookRelay(hookspecs, registry=self._registry) + + def finish_recording(self): + for recorder in self._recorders.values(): + self._registry.unregister(recorder) + self._recorders.clear() + + def _makecallparser(self, method): + name = method.__name__ + args, varargs, varkw, default = py.std.inspect.getargspec(method) + if not args or args[0] != "self": + args.insert(0, 'self') + fspec = py.std.inspect.formatargspec(args, varargs, varkw, default) + # we use exec because we want to have early type + # errors on wrong input arguments, using + # *args/**kwargs delays this and gives errors + # elsewhere + exec (py.code.compile(""" + def %(name)s%(fspec)s: + self._recorder.calls.append( + ParsedCall(%(name)r, locals())) + """ % locals())) + return locals()[name] + + def getcalls(self, names): + if isinstance(names, str): + names = names.split() + for name in names: + for cls in self._recorders: + if name in vars(cls): + break + else: + raise ValueError("callname %r not found in %r" %( + name, self._recorders.keys())) + l = [] + for call in self.calls: + if call._name in names: + l.append(call) + return l + + def popcall(self, name): + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + raise ValueError("could not find call %r" %(name, )) + + def getcall(self, name): + l = self.getcalls(name) + assert len(l) == 1, (name, l) + return l[0] + --- /dev/null +++ b/py/_code/__init__.py @@ -0,0 +1,1 @@ +""" python inspection/code generation API """ --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -254,7 +254,7 @@ class PluginDoc(RestWriter): warn("missing docstring", func) def emit_options(self, plugin): - from py.impl.test.parseopt import Parser + from py._test.parseopt import Parser options = [] parser = Parser(processopt=options.append) if hasattr(plugin, 'pytest_addoption'): --- /dev/null +++ b/py/_plugin/pytest_helpconfig.py @@ -0,0 +1,159 @@ +""" provide version info, conftest/environment config names. +""" +import py +import inspect, sys + +def pytest_addoption(parser): + group = parser.getgroup('debugconfig') + group.addoption('--version', action="store_true", + help="display py lib version and import information.") + group._addoption('-p', action="append", dest="plugins", default = [], + metavar="name", + help="early-load given plugin (multi-allowed).") + group.addoption('--traceconfig', + action="store_true", dest="traceconfig", default=False, + help="trace considerations of conftest.py files."), + group._addoption('--nomagic', + action="store_true", dest="nomagic", default=False, + help="don't reinterpret asserts, no traceback cutting. ") + group.addoption('--debug', + action="store_true", dest="debug", default=False, + help="generate and show internal debugging information.") + group.addoption("--help-config", action="store_true", dest="helpconfig", + help="show available conftest.py and ENV-variable names.") + + +def pytest_configure(__multicall__, config): + if config.option.version: + p = py.path.local(py.__file__).dirpath() + sys.stderr.write("This is py.test version %s, imported from %s\n" % + (py.__version__, p)) + sys.exit(0) + if not config.option.helpconfig: + return + __multicall__.execute() + options = [] + for group in config._parser._groups: + options.extend(group.options) + widths = [0] * 10 + tw = py.io.TerminalWriter() + tw.sep("-") + tw.line("%-13s | %-18s | %-25s | %s" %( + "cmdline name", "conftest.py name", "ENV-variable name", "help")) + tw.sep("-") + + options = [opt for opt in options if opt._long_opts] + options.sort(key=lambda x: x._long_opts) + for opt in options: + if not opt._long_opts: + continue + optstrings = list(opt._long_opts) # + list(opt._short_opts) + optstrings = filter(None, optstrings) + optstring = "|".join(optstrings) + line = "%-13s | %-18s | %-25s | %s" %( + optstring, + "option_%s" % opt.dest, + "PYTEST_OPTION_%s" % opt.dest.upper(), + opt.help and opt.help or "", + ) + tw.line(line[:tw.fullwidth]) + for name, help in conftest_options: + line = "%-13s | %-18s | %-25s | %s" %( + "", + name, + "", + help, + ) + tw.line(line[:tw.fullwidth]) + + tw.sep("-") + sys.exit(0) + +conftest_options = ( + ('pytest_plugins', 'list of plugin names to load'), + ('collect_ignore', '(relative) paths ignored during collection'), + ('rsyncdirs', 'to-be-rsynced directories for dist-testing'), +) + +def pytest_report_header(config): + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append("using py lib: %s" % (py.path.local(py.__file__).dirpath())) + if config.option.traceconfig: + lines.append("active plugins:") + plugins = [] + items = config.pluginmanager._name2plugin.items() + for name, plugin in items: + lines.append(" %-20s: %s" %(name, repr(plugin))) + return lines + + +# ===================================================== +# validate plugin syntax and hooks +# ===================================================== + +def pytest_plugin_registered(manager, plugin): + hookspec = manager.hook._hookspecs + methods = collectattr(plugin) + hooks = collectattr(hookspec) + stringio = py.io.TextIO() + def Print(*args): + if args: + stringio.write(" ".join(map(str, args))) + stringio.write("\n") + + fail = False + while methods: + name, method = methods.popitem() + #print "checking", name + if isgenerichook(name): + continue + if name not in hooks: + Print("found unknown hook:", name) + fail = True + else: + method_args = getargs(method) + if '__multicall__' in method_args: + method_args.remove('__multicall__') + hook = hooks[name] + hookargs = getargs(hook) + for arg in method_args: + if arg not in hookargs: + Print("argument %r not available" %(arg, )) + Print("actual definition: %s" %(formatdef(method))) + Print("available hook arguments: %s" % + ", ".join(hookargs)) + fail = True + break + #if not fail: + # print "matching hook:", formatdef(method) + if fail: + name = getattr(plugin, '__name__', plugin) + raise PluginValidationError("%s:\n%s" %(name, stringio.getvalue())) + +class PluginValidationError(Exception): + """ plugin failed validation. """ + +def isgenerichook(name): + return name == "pytest_plugins" or \ + name.startswith("pytest_funcarg__") + +def getargs(func): + args = inspect.getargs(py.code.getrawcode(func))[0] + startindex = inspect.ismethod(func) and 1 or 0 + return args[startindex:] + +def collectattr(obj, prefixes=("pytest_",)): + methods = {} + for apiname in dir(obj): + for prefix in prefixes: + if apiname.startswith(prefix): + methods[apiname] = getattr(obj, apiname) + return methods + +def formatdef(func): + return "%s%s" %( + func.__name__, + inspect.formatargspec(*inspect.getargspec(func)) + ) + --- /dev/null +++ b/py/_code/assertion.py @@ -0,0 +1,75 @@ +import sys +import py + +BuiltinAssertionError = py.builtin.builtins.AssertionError + + +def _format_explanation(explanation): + # uck! See CallFunc for where \n{ and \n} escape sequences are used + raw_lines = (explanation or '').split('\n') + # escape newlines not followed by { and } + lines = [raw_lines[0]] + for l in raw_lines[1:]: + if l.startswith('{') or l.startswith('}'): + lines.append(l) + else: + lines[-1] += '\\n' + l + + result = lines[:1] + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith('{'): + if stackcnt[-1]: + s = 'and ' + else: + s = 'where ' + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + else: + assert line.startswith('}') + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + assert len(stack) == 1 + return '\n'.join(result) + + +if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): + from py._code._assertionnew import interpret +else: + from py._code._assertionold import interpret + + +class AssertionError(BuiltinAssertionError): + + def __init__(self, *args): + BuiltinAssertionError.__init__(self, *args) + if args: + try: + self.msg = str(args[0]) + except (KeyboardInterrupt, SystemExit): + raise + except: + self.msg = "<[broken __repr__] %s at %0xd>" %( + args[0].__class__, id(args[0])) + else: + f = py.code.Frame(sys._getframe(1)) + try: + source = f.statement + source = str(source.deindent()).strip() + except py.error.ENOENT: + source = None + # this can also occur during reinterpretation, when the + # co_filename is set to "". + if source: + self.msg = interpret(source, f, should_fail=True) + if not self.args: + self.args = (self.msg,) + else: + self.msg = None + +if sys.version_info > (3, 0): + AssertionError.__module__ = "builtins" --- /dev/null +++ b/py/_error.py @@ -0,0 +1,83 @@ +""" +create errno-specific classes for IO or os calls. + +""" +import sys, os, errno + +class Error(EnvironmentError): + def __repr__(self): + return "%s.%s %r: %s " %(self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + #repr(self.args) + ) + + def __str__(self): + s = "[%s]: %s" %(self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 22: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + +class ErrorMaker(object): + """ lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + Error = Error + _errno2class = {} + + def __getattr__(self, name): + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno): + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,)) + errorcls = type(Error)(clsname, (Error,), + {'__module__':'py.error', + '__doc__': os.strerror(eno)}) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call(self, func, *args): + """ call a function and raise an errno-exception if applicable. """ + __tracebackhide__ = True + try: + return func(*args) + except self.Error: + raise + except EnvironmentError: + cls, value, tb = sys.exc_info() + if not hasattr(value, 'errno'): + raise + __tracebackhide__ = False + errno = value.errno + try: + if not isinstance(value, WindowsError): + raise NameError + except NameError: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + else: + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + raise cls("%s%r" % (func.__name__, args)) + __tracebackhide__ = True + +error = ErrorMaker() --- a/doc/confrest.py +++ b/doc/confrest.py @@ -1,6 +1,6 @@ import py -from py.plugin.pytest_restdoc import convert_rest_html, strip_html_header +from py._plugin.pytest_restdoc import convert_rest_html, strip_html_header html = py.xml.html --- a/py/__init__.py +++ b/py/__init__.py @@ -15,164 +15,164 @@ import py.apipkg py.apipkg.initpkg(__name__, dict( # access to all standard lib modules - std = '.impl.std:std', + std = '._std:std', # access to all posix errno's as classes - error = '.impl.error:error', + error = '._error:error', - _pydir = '.impl._metainfo:pydir', + _pydir = '.__metainfo:pydir', version = 'py:__version__', # backward compatibility cmdline = { - 'pytest': '.impl.cmdline.pytest:main', - 'pylookup': '.impl.cmdline.pylookup:main', - 'pycountloc': '.impl.cmdline.pycountlog:main', - 'pytest': '.impl.test.cmdline:main', - 'pylookup': '.impl.cmdline.pylookup:main', - 'pycountloc': '.impl.cmdline.pycountloc:main', - 'pycleanup': '.impl.cmdline.pycleanup:main', - 'pywhich' : '.impl.cmdline.pywhich:main', - 'pysvnwcrevert' : '.impl.cmdline.pysvnwcrevert:main', - 'pyconvert_unittest' : '.impl.cmdline.pyconvert_unittest:main', + 'pytest': '._cmdline.pytest:main', + 'pylookup': '._cmdline.pylookup:main', + 'pycountloc': '._cmdline.pycountlog:main', + 'pytest': '._test.cmdline:main', + 'pylookup': '._cmdline.pylookup:main', + 'pycountloc': '._cmdline.pycountloc:main', + 'pycleanup': '._cmdline.pycleanup:main', + 'pywhich' : '._cmdline.pywhich:main', + 'pysvnwcrevert' : '._cmdline.pysvnwcrevert:main', + 'pyconvert_unittest' : '._cmdline.pyconvert_unittest:main', }, test = { # helpers for use from test functions or collectors - '__onfirstaccess__' : '.impl.test.config:onpytestaccess', - '__doc__' : '.impl.test:__doc__', - 'raises' : '.impl.test.outcome:raises', - 'skip' : '.impl.test.outcome:skip', - 'importorskip' : '.impl.test.outcome:importorskip', - 'fail' : '.impl.test.outcome:fail', - 'exit' : '.impl.test.outcome:exit', + '__onfirstaccess__' : '._test.config:onpytestaccess', + '__doc__' : '._test:__doc__', + 'raises' : '._test.outcome:raises', + 'skip' : '._test.outcome:skip', + 'importorskip' : '._test.outcome:importorskip', + 'fail' : '._test.outcome:fail', + 'exit' : '._test.outcome:exit', # configuration/initialization related test api - 'config' : '.impl.test.config:config_per_process', - 'ensuretemp' : '.impl.test.config:ensuretemp', + 'config' : '._test.config:config_per_process', + 'ensuretemp' : '._test.config:ensuretemp', 'collect': { - 'Collector' : '.impl.test.collect:Collector', - 'Directory' : '.impl.test.collect:Directory', - 'File' : '.impl.test.collect:File', - 'Item' : '.impl.test.collect:Item', - 'Module' : '.impl.test.pycollect:Module', - 'Class' : '.impl.test.pycollect:Class', - 'Instance' : '.impl.test.pycollect:Instance', - 'Generator' : '.impl.test.pycollect:Generator', - 'Function' : '.impl.test.pycollect:Function', - '_fillfuncargs' : '.impl.test.funcargs:fillfuncargs', + 'Collector' : '._test.collect:Collector', + 'Directory' : '._test.collect:Directory', + 'File' : '._test.collect:File', + 'Item' : '._test.collect:Item', + 'Module' : '._test.pycollect:Module', + 'Class' : '._test.pycollect:Class', + 'Instance' : '._test.pycollect:Instance', + 'Generator' : '._test.pycollect:Generator', + 'Function' : '._test.pycollect:Function', + '_fillfuncargs' : '._test.funcargs:fillfuncargs', }, 'cmdline': { - 'main' : '.impl.test.cmdline:main', # backward compat + 'main' : '._test.cmdline:main', # backward compat }, }, # hook into the top-level standard library process = { - '__doc__' : '.impl.process:__doc__', - 'cmdexec' : '.impl.process.cmdexec:cmdexec', - 'kill' : '.impl.process.killproc:kill', - 'ForkedFunc' : '.impl.process.forkedfunc:ForkedFunc', + '__doc__' : '._process:__doc__', + 'cmdexec' : '._process.cmdexec:cmdexec', + 'kill' : '._process.killproc:kill', + 'ForkedFunc' : '._process.forkedfunc:ForkedFunc', }, path = { - '__doc__' : '.impl.path:__doc__', - 'svnwc' : '.impl.path.svnwc:SvnWCCommandPath', - 'svnurl' : '.impl.path.svnurl:SvnCommandPath', - 'local' : '.impl.path.local:LocalPath', - 'SvnAuth' : '.impl.path.svnwc:SvnAuth', + '__doc__' : '._path:__doc__', + 'svnwc' : '._path.svnwc:SvnWCCommandPath', + 'svnurl' : '._path.svnurl:SvnCommandPath', + 'local' : '._path.local:LocalPath', + 'SvnAuth' : '._path.svnwc:SvnAuth', }, # some nice slightly magic APIs magic = { - 'invoke' : '.impl.code.oldmagic:invoke', - 'revoke' : '.impl.code.oldmagic:revoke', - 'patch' : '.impl.code.oldmagic:patch', - 'revert' : '.impl.code.oldmagic:revert', - 'autopath' : '.impl.path.local:autopath', - 'AssertionError' : '.impl.code.oldmagic2:AssertionError', + 'invoke' : '._code.oldmagic:invoke', + 'revoke' : '._code.oldmagic:revoke', + 'patch' : '._code.oldmagic:patch', + 'revert' : '._code.oldmagic:revert', + 'autopath' : '._path.local:autopath', + 'AssertionError' : '._code.oldmagic2:AssertionError', }, # python inspection/code-generation API code = { - '__doc__' : '.impl.code:__doc__', - 'compile' : '.impl.code.source:compile_', - 'Source' : '.impl.code.source:Source', - 'Code' : '.impl.code.code:Code', - 'Frame' : '.impl.code.code:Frame', - 'ExceptionInfo' : '.impl.code.code:ExceptionInfo', - 'Traceback' : '.impl.code.code:Traceback', - 'getfslineno' : '.impl.code.source:getfslineno', - 'getrawcode' : '.impl.code.code:getrawcode', - 'patch_builtins' : '.impl.code.code:patch_builtins', - 'unpatch_builtins' : '.impl.code.code:unpatch_builtins', - '_AssertionError' : '.impl.code.assertion:AssertionError', + '__doc__' : '._code:__doc__', + 'compile' : '._code.source:compile_', + 'Source' : '._code.source:Source', + 'Code' : '._code.code:Code', + 'Frame' : '._code.code:Frame', + 'ExceptionInfo' : '._code.code:ExceptionInfo', + 'Traceback' : '._code.code:Traceback', + 'getfslineno' : '._code.source:getfslineno', + 'getrawcode' : '._code.code:getrawcode', + 'patch_builtins' : '._code.code:patch_builtins', + 'unpatch_builtins' : '._code.code:unpatch_builtins', + '_AssertionError' : '._code.assertion:AssertionError', }, # backports and additions of builtins builtin = { - '__doc__' : '.impl.builtin:__doc__', - 'enumerate' : '.impl.builtin:enumerate', - 'reversed' : '.impl.builtin:reversed', - 'sorted' : '.impl.builtin:sorted', - 'set' : '.impl.builtin:set', - 'frozenset' : '.impl.builtin:frozenset', - 'BaseException' : '.impl.builtin:BaseException', - 'GeneratorExit' : '.impl.builtin:GeneratorExit', - 'print_' : '.impl.builtin:print_', - '_reraise' : '.impl.builtin:_reraise', - '_tryimport' : '.impl.builtin:_tryimport', - 'exec_' : '.impl.builtin:exec_', - '_basestring' : '.impl.builtin:_basestring', - '_totext' : '.impl.builtin:_totext', - '_isbytes' : '.impl.builtin:_isbytes', - '_istext' : '.impl.builtin:_istext', - '_getimself' : '.impl.builtin:_getimself', - '_getfuncdict' : '.impl.builtin:_getfuncdict', - 'builtins' : '.impl.builtin:builtins', - 'execfile' : '.impl.builtin:execfile', - 'callable' : '.impl.builtin:callable', + '__doc__' : '._builtin:__doc__', + 'enumerate' : '._builtin:enumerate', + 'reversed' : '._builtin:reversed', + 'sorted' : '._builtin:sorted', + 'set' : '._builtin:set', + 'frozenset' : '._builtin:frozenset', + 'BaseException' : '._builtin:BaseException', + 'GeneratorExit' : '._builtin:GeneratorExit', + 'print_' : '._builtin:print_', + '_reraise' : '._builtin:_reraise', + '_tryimport' : '._builtin:_tryimport', + 'exec_' : '._builtin:exec_', + '_basestring' : '._builtin:_basestring', + '_totext' : '._builtin:_totext', + '_isbytes' : '._builtin:_isbytes', + '_istext' : '._builtin:_istext', + '_getimself' : '._builtin:_getimself', + '_getfuncdict' : '._builtin:_getfuncdict', + 'builtins' : '._builtin:builtins', + 'execfile' : '._builtin:execfile', + 'callable' : '._builtin:callable', }, # input-output helping io = { - '__doc__' : '.impl.io:__doc__', - 'dupfile' : '.impl.io.capture:dupfile', - 'TextIO' : '.impl.io.capture:TextIO', - 'BytesIO' : '.impl.io.capture:BytesIO', - 'FDCapture' : '.impl.io.capture:FDCapture', - 'StdCapture' : '.impl.io.capture:StdCapture', - 'StdCaptureFD' : '.impl.io.capture:StdCaptureFD', - 'TerminalWriter' : '.impl.io.terminalwriter:TerminalWriter', + '__doc__' : '._io:__doc__', + 'dupfile' : '._io.capture:dupfile', + 'TextIO' : '._io.capture:TextIO', + 'BytesIO' : '._io.capture:BytesIO', + 'FDCapture' : '._io.capture:FDCapture', + 'StdCapture' : '._io.capture:StdCapture', + 'StdCaptureFD' : '._io.capture:StdCaptureFD', + 'TerminalWriter' : '._io.terminalwriter:TerminalWriter', }, # small and mean xml/html generation xml = { - '__doc__' : '.impl.xmlgen:__doc__', - 'html' : '.impl.xmlgen:html', - 'Tag' : '.impl.xmlgen:Tag', - 'raw' : '.impl.xmlgen:raw', - 'Namespace' : '.impl.xmlgen:Namespace', - 'escape' : '.impl.xmlgen:escape', + '__doc__' : '._xmlgen:__doc__', + 'html' : '._xmlgen:html', + 'Tag' : '._xmlgen:Tag', + 'raw' : '._xmlgen:raw', + 'Namespace' : '._xmlgen:Namespace', + 'escape' : '._xmlgen:escape', }, log = { # logging API ('producers' and 'consumers' connected via keywords) - '__doc__' : '.impl.log:__doc__', - '_apiwarn' : '.impl.log.warning:_apiwarn', - 'Producer' : '.impl.log.log:Producer', - 'setconsumer' : '.impl.log.log:setconsumer', - '_setstate' : '.impl.log.log:setstate', - '_getstate' : '.impl.log.log:getstate', - 'Path' : '.impl.log.log:Path', - 'STDOUT' : '.impl.log.log:STDOUT', - 'STDERR' : '.impl.log.log:STDERR', - 'Syslog' : '.impl.log.log:Syslog', + '__doc__' : '._log:__doc__', + '_apiwarn' : '._log.warning:_apiwarn', + 'Producer' : '._log.log:Producer', + 'setconsumer' : '._log.log:setconsumer', + '_setstate' : '._log.log:setstate', + '_getstate' : '._log.log:getstate', + 'Path' : '._log.log:Path', + 'STDOUT' : '._log.log:STDOUT', + 'STDERR' : '._log.log:STDERR', + 'Syslog' : '._log.log:Syslog', }, # compatibility modules (deprecated) compat = { - '__doc__' : '.impl.compat:__doc__', - 'doctest' : '.impl.compat.dep_doctest:doctest', - 'optparse' : '.impl.compat.dep_optparse:optparse', - 'textwrap' : '.impl.compat.dep_textwrap:textwrap', - 'subprocess' : '.impl.compat.dep_subprocess:subprocess', + '__doc__' : '._compat:__doc__', + 'doctest' : '._compat.dep_doctest:doctest', + 'optparse' : '._compat.dep_optparse:optparse', + 'textwrap' : '._compat.dep_textwrap:textwrap', + 'subprocess' : '._compat.dep_subprocess:subprocess', }, )) --- /dev/null +++ b/py/_path/gateway/__init__.py @@ -0,0 +1,1 @@ +# --- /dev/null +++ b/py/_cmdline/pylookup.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +"""\ +py.lookup [search_directory] SEARCH_STRING [options] + +Looks recursively at Python files for a SEARCH_STRING, starting from the +present working directory. Prints the line, with the filename and line-number +prepended.""" + +import sys, os +import py +from py._io.terminalwriter import ansi_print, terminal_width +import re + +def rec(p): + return p.check(dotfile=0) + +parser = py.std.optparse.OptionParser(usage=__doc__) +parser.add_option("-i", "--ignore-case", action="store_true", dest="ignorecase", + help="ignore case distinctions") +parser.add_option("-C", "--context", action="store", type="int", dest="context", + default=0, help="How many lines of output to show") + +def find_indexes(search_line, string): + indexes = [] + before = 0 + while 1: + i = search_line.find(string, before) + if i == -1: + break + indexes.append(i) + before = i + len(string) + return indexes + +def main(): + (options, args) = parser.parse_args() + if len(args) == 2: + search_dir, string = args + search_dir = py.path.local(search_dir) + else: + search_dir = py.path.local() + string = args[0] + if options.ignorecase: + string = string.lower() + for x in search_dir.visit('*.py', rec): + # match filename directly + s = x.relto(search_dir) + if options.ignorecase: + s = s.lower() + if s.find(string) != -1: + sys.stdout.write("%s: filename matches %r" %(x, string) + "\n") + + try: + s = x.read() + except py.error.ENOENT: + pass # whatever, probably broken link (ie emacs lock) + searchs = s + if options.ignorecase: + searchs = s.lower() + if s.find(string) != -1: + lines = s.splitlines() + if options.ignorecase: + searchlines = s.lower().splitlines() + else: + searchlines = lines + for i, (line, searchline) in enumerate(zip(lines, searchlines)): + indexes = find_indexes(searchline, string) + if not indexes: + continue + if not options.context: + sys.stdout.write("%s:%d: " %(x.relto(search_dir), i+1)) + last_index = 0 + for index in indexes: + sys.stdout.write(line[last_index: index]) + ansi_print(line[index: index+len(string)], + file=sys.stdout, esc=31, newline=False) + last_index = index + len(string) + sys.stdout.write(line[last_index:] + "\n") + else: + context = (options.context)/2 + for count in range(max(0, i-context), min(len(lines) - 1, i+context+1)): + print("%s:%d: %s" %(x.relto(search_dir), count+1, lines[count].rstrip())) + print("-" * terminal_width) --- /dev/null +++ b/py/_path/svnurl.py @@ -0,0 +1,376 @@ +""" +module defining a subversion path object based on the external +command 'svn'. This modules aims to work with svn 1.3 and higher +but might also interact well with earlier versions. +""" + +import os, sys, time, re +import py +from py import path, process +from py._path import common +from py._path import svnwc as svncommon +from py._path.cacheutil import BuildcostAccessCache, AgingCache + +DEBUG=False + +class SvnCommandPath(svncommon.SvnPathBase): + """ path implementation that offers access to (possibly remote) subversion + repositories. """ + + _lsrevcache = BuildcostAccessCache(maxentries=128) + _lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0) + + def __new__(cls, path, rev=None, auth=None): + self = object.__new__(cls) + if isinstance(path, cls): + rev = path.rev + auth = path.auth + path = path.strpath + svncommon.checkbadchars(path) + path = path.rstrip('/') + self.strpath = path + self.rev = rev + self.auth = auth + return self + + def __repr__(self): + if self.rev == -1: + return 'svnurl(%r)' % self.strpath + else: + return 'svnurl(%r, %r)' % (self.strpath, self.rev) + + def _svnwithrev(self, cmd, *args): + """ execute an svn command, append our own url and revision """ + if self.rev is None: + return self._svnwrite(cmd, *args) + else: + args = ['-r', self.rev] + list(args) + return self._svnwrite(cmd, *args) + + def _svnwrite(self, cmd, *args): + """ execute an svn command, append our own url """ + l = ['svn %s' % cmd] + args = ['"%s"' % self._escape(item) for item in args] + l.extend(args) + l.append('"%s"' % self._encodedurl()) + # fixing the locale because we can't otherwise parse + string = " ".join(l) + if DEBUG: + print("execing %s" % string) + out = self._svncmdexecauth(string) + return out + + def _svncmdexecauth(self, cmd): + """ execute an svn command 'as is' """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._cmdexec(cmd) + + def _cmdexec(self, cmd): + try: + out = process.cmdexec(cmd) + except py.process.cmdexec.Error: + e = sys.exc_info()[1] + if (e.err.find('File Exists') != -1 or + e.err.find('File already exists') != -1): + raise py.error.EEXIST(self) + raise + return out + + def _svnpopenauth(self, cmd): + """ execute an svn command, return a pipe for reading stdin """ + cmd = svncommon.fixlocale() + cmd + if self.auth is not None: + cmd += ' ' + self.auth.makecmdoptions() + return self._popen(cmd) + + def _popen(self, cmd): + return os.popen(cmd) + + def _encodedurl(self): + return self._escape(self.strpath) + + def _norev_delentry(self, path): + auth = self.auth and self.auth.makecmdoptions() or None + self._lsnorevcache.delentry((str(path), auth)) + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + if mode not in ("r", "rU",): + raise ValueError("mode %r not supported" % (mode,)) + assert self.check(file=1) # svn cat returns an empty file otherwise + if self.rev is None: + return self._svnpopenauth('svn cat "%s"' % ( + self._escape(self.strpath), )) + else: + return self._svnpopenauth('svn cat -r %s "%s"' % ( + self.rev, self._escape(self.strpath))) + + def dirpath(self, *args, **kwargs): + """ return the directory path of the current path joined + with any given path arguments. + """ + l = self.strpath.split(self.sep) + if len(l) < 4: + raise py.error.EINVAL(self, "base is not valid") + elif len(l) == 4: + return self.join(*args, **kwargs) + else: + return self.new(basename='').join(*args, **kwargs) + + # modifying methods (cache must be invalidated) + def mkdir(self, *args, **kwargs): + """ create & return the directory joined with args. + pass a 'msg' keyword argument to set the commit message. + """ + commit_msg = kwargs.get('msg', "mkdir by py lib invocation") + createpath = self.join(*args) + createpath._svnwrite('mkdir', '-m', commit_msg) + self._norev_delentry(createpath.dirpath()) + return createpath + + def copy(self, target, msg='copied by py lib invocation'): + """ copy path to target with checkin message msg.""" + if getattr(target, 'rev', None) is not None: + raise py.error.EINVAL(target, "revisions are immutable") + self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg, + self._escape(self), self._escape(target))) + self._norev_delentry(target.dirpath()) + + def rename(self, target, msg="renamed by py lib invocation"): + """ rename this path to target with checkin message msg. """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %( + msg, self._escape(self), self._escape(target))) + self._norev_delentry(self.dirpath()) + self._norev_delentry(self) + + def remove(self, rec=1, msg='removed by py lib invocation'): + """ remove a file or directory (or a directory tree if rec=1) with +checkin message msg.""" + if self.rev is not None: + raise py.error.EINVAL(self, "revisions are immutable") + self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self))) + self._norev_delentry(self.dirpath()) + + def export(self, topath): + """ export to a local path + + topath should not exist prior to calling this, returns a + py.path.local instance + """ + topath = py.path.local(topath) + args = ['"%s"' % (self._escape(self),), + '"%s"' % (self._escape(topath),)] + if self.rev is not None: + args = ['-r', str(self.rev)] + args + self._svncmdexecauth('svn export %s' % (' '.join(args),)) + return topath + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). If you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + if getattr(self, 'rev', None) is not None: + raise py.error.EINVAL(self, "revisions are immutable") + target = self.join(*args) + dir = kwargs.get('dir', 0) + for x in target.parts(reverse=True): + if x.check(): + break + else: + raise py.error.ENOENT(target, "has not any valid base!") + if x == target: + if not x.check(dir=dir): + raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x) + return x + tocreate = target.relto(x) + basename = tocreate.split(self.sep, 1)[0] + tempdir = py.path.local.mkdtemp() + try: + tempdir.ensure(tocreate, dir=dir) + cmd = 'svn import -m "%s" "%s" "%s"' % ( + "ensure %s" % self._escape(tocreate), + self._escape(tempdir.join(basename)), + x.join(basename)._encodedurl()) + self._svncmdexecauth(cmd) + self._norev_delentry(x) + finally: + tempdir.remove() + return target + + # end of modifying methods + def _propget(self, name): + res = self._svnwithrev('propget', name) + return res[:-1] # strip trailing newline + + def _proplist(self): + res = self._svnwithrev('proplist') + lines = res.split('\n') + lines = [x.strip() for x in lines[1:]] + return svncommon.PropListDict(self, lines) + + def info(self): + """ return an Info structure with svn-provided information. """ + parent = self.dirpath() + nameinfo_seq = parent._listdir_nameinfo() + bn = self.basename + for name, info in nameinfo_seq: + if name == bn: + return info + raise py.error.ENOENT(self) + + + def _listdir_nameinfo(self): + """ return sequence of name-info directory entries of self """ + def builder(): + try: + res = self._svnwithrev('ls', '-v') + except process.cmdexec.Error: + e = sys.exc_info()[1] + if e.err.find('non-existent in that revision') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('File not found') != -1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('not part of a repository')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.find('Unable to open')!=-1: + raise py.error.ENOENT(self, e.err) + elif e.err.lower().find('method not allowed')!=-1: + raise py.error.EACCES(self, e.err) + raise py.error.Error(e.err) + lines = res.split('\n') + nameinfo_seq = [] + for lsline in lines: + if lsline: + info = InfoSvnCommand(lsline) + if info._name != '.': # svn 1.5 produces '.' dirs, + nameinfo_seq.append((info._name, info)) + nameinfo_seq.sort() + return nameinfo_seq + auth = self.auth and self.auth.makecmdoptions() or None + if self.rev is not None: + return self._lsrevcache.getorbuild((self.strpath, self.rev, auth), + builder) + else: + return self._lsnorevcache.getorbuild((self.strpath, auth), + builder) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + nameinfo_seq = self._listdir_nameinfo() + if len(nameinfo_seq) == 1: + name, info = nameinfo_seq[0] + if name == self.basename and info.kind == 'file': + #if not self.check(dir=1): + raise py.error.ENOTDIR(self) + paths = [self.join(name) for (name, info) in nameinfo_seq] + if fil: + paths = [x for x in paths if fil(x)] + self._sortlist(paths, sort) + return paths + + + def log(self, rev_start=None, rev_end=1, verbose=False): + """ return a list of LogEntry instances for this path. +rev_start is the starting revision (defaulting to the first one). +rev_end is the last revision (defaulting to HEAD). +if verbose is True, then the LogEntry instances also know which files changed. +""" + assert self.check() #make it simpler for the pipe + rev_start = rev_start is None and "HEAD" or rev_start + rev_end = rev_end is None and "HEAD" or rev_end + + if rev_start == "HEAD" and rev_end == 1: + rev_opt = "" + else: + rev_opt = "-r %s:%s" % (rev_start, rev_end) + verbose_opt = verbose and "-v" or "" + xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' % + (rev_opt, verbose_opt, self.strpath)) + from xml.dom import minidom + tree = minidom.parse(xmlpipe) + result = [] + for logentry in filter(None, tree.firstChild.childNodes): + if logentry.nodeType == logentry.ELEMENT_NODE: + result.append(svncommon.LogEntry(logentry)) + return result + +#01234567890123456789012345678901234567890123467 +# 2256 hpk 165 Nov 24 17:55 __init__.py +# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!! +# 1312 johnny 1627 May 05 14:32 test_decorators.py +# +class InfoSvnCommand: + # the '0?' part in the middle is an indication of whether the resource is + # locked, see 'svn help ls' + lspattern = re.compile( + r'^ *(?P\d+) +(?P.+?) +(0? *(?P\d+))? ' + '*(?P\w+ +\d{2} +[\d:]+) +(?P.*)$') + def __init__(self, line): + # this is a typical line from 'svn ls http://...' + #_ 1127 jum 0 Jul 13 15:28 branch/ + match = self.lspattern.match(line) + data = match.groupdict() + self._name = data['file'] + if self._name[-1] == '/': + self._name = self._name[:-1] + self.kind = 'dir' + else: + self.kind = 'file' + #self.has_props = l.pop(0) == 'P' + self.created_rev = int(data['rev']) + self.last_author = data['author'] + self.size = data['size'] and int(data['size']) or 0 + self.mtime = parse_time_with_missing_year(data['date']) + self.time = self.mtime * 1000000 + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +#____________________________________________________ +# +# helper functions +#____________________________________________________ +def parse_time_with_missing_year(timestr): + """ analyze the time part from a single line of "svn ls -v" + the svn output doesn't show the year makes the 'timestr' + ambigous. + """ + import calendar + t_now = time.gmtime() + + tparts = timestr.split() + month = time.strptime(tparts.pop(0), '%b')[1] + day = time.strptime(tparts.pop(0), '%d')[2] + last = tparts.pop(0) # year or hour:minute + try: + year = time.strptime(last, '%Y')[0] + hour = minute = 0 + except ValueError: + hour, minute = time.strptime(last, '%H:%M')[3:5] + year = t_now[0] + + t_result = (year, month, day, hour, minute, 0,0,0,0) + if t_result > t_now: + year -= 1 + t_result = (year, month, day, hour, minute, 0,0,0,0) + return calendar.timegm(t_result) + +class PathEntry: + def __init__(self, ppart): + self.strpath = ppart.firstChild.nodeValue.encode('UTF-8') + self.action = ppart.getAttribute('action').encode('UTF-8') + if self.action == 'A': + self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8') + if self.copyfrom_path: + self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev')) + --- /dev/null +++ b/py/_path/local.py @@ -0,0 +1,799 @@ +""" +local path implementation. +""" +import sys, os, stat, re, atexit +import py +from py._path import common + +iswin32 = sys.platform == "win32" + +class Stat(object): + def __getattr__(self, name): + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + entry = py.error.checked_call(pwd.getpwuid, self.uid) + return entry[0] + owner = property(owner, None, None, "owner of path") + + def group(self): + """ return group name of file. """ + if iswin32: + raise NotImplementedError("XXX win32") + import grp + entry = py.error.checked_call(grp.getgrgid, self.gid) + return entry[0] + group = property(group) + +class PosixPath(common.PathBase): + def chown(self, user, group, rec=0): + """ change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + py.error.checked_call(os.chown, str(x), uid, gid) + py.error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self): + """ return value of a symbolic link. """ + return py.error.checked_call(os.readlink, self.strpath) + + def mklinkto(self, oldname): + """ posix style hard link to another name. """ + py.error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """ create a symbolic link with the given value (pointing to another name). """ + if absolute: + py.error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(('..', )*n + (relsource, )) + py.error.checked_call(os.symlink, target, self.strpath) + + def samefile(self, other): + """ return True if other refers to the same stat object as self. """ + return py.error.checked_call(os.path.samefile, str(self), str(other)) + +def getuserid(user): + import pwd + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] + return user + +def getgroupid(group): + import grp + if not isinstance(group, int): + group = grp.getgrnam(group)[2] + return group + +FSBase = not iswin32 and PosixPath or common.PathBase + +class LocalPath(FSBase): + """ object oriented interface to os.path and other local filesystem + related information. + """ + sep = os.sep + class Checkers(common.Checkers): + def _stat(self): + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except py.error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return stat.S_ISDIR(self._stat().mode) + + def file(self): + return stat.S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return stat.S_ISLNK(st.mode) + + def __new__(cls, path=None): + """ Initialize and return a local Path instance. + + Path can be relative to the current directory. + If it is None then the current working directory is taken. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if isinstance(path, common.PathBase): + if path.__class__ == cls: + return path + path = path.strpath + # initialize the path + self = object.__new__(cls) + if not path: + self.strpath = os.getcwd() + elif isinstance(path, py.builtin._basestring): + self.strpath = os.path.abspath(os.path.normpath(str(path))) + else: + raise ValueError("can only pass None, Path instances " + "or non-empty strings to LocalPath") + assert isinstance(self.strpath, str) + return self + + def __hash__(self): + return hash(self.strpath) + + def __eq__(self, other): + s1 = str(self) + s2 = str(other) + if iswin32: + s1 = s1.lower() + s2 = s2.lower() + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return str(self) < str(other) + + def remove(self, rec=1): + """ remove a file or directory (or a directory tree if rec=1). """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(448, rec=1) # octcal 0700 + py.error.checked_call(py.std.shutil.rmtree, self.strpath) + else: + py.error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(448) # octcal 0700 + py.error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """ return hexdigest of hashvalue for this file. """ + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError("Don't know how to compute %r hash" %(hashtype,)) + f = self.open('rb') + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """ create a modified version of this path. + the following keyword arguments modify various path parts: + + a:/some/path/to/a/file.ext + || drive + |-------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + obj = object.__new__(self.__class__) + drive, dirname, basename, purebasename,ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext") + if 'basename' in kw: + if 'purebasename' in kw or 'ext' in kw: + raise ValueError("invalid specification %r" % kw) + else: + pb = kw.setdefault('purebasename', purebasename) + try: + ext = kw['ext'] + except KeyError: + pass + else: + if ext and not ext.startswith('.'): + ext = '.' + ext + kw['basename'] = pb + ext + + kw.setdefault('drive', drive) + kw.setdefault('dirname', dirname) + kw.setdefault('sep', self.sep) + obj.strpath = os.path.normpath( + "%(drive)s%(dirname)s%(sep)s%(basename)s" % kw) + return obj + + def _getbyspec(self, spec): + """ return a sequence of specified path parts. 'spec' is + a comma separated string containing path part names. + according to the following convention: + a:/some/path/to/a/file.ext + || drive + |-------------| dirname + |------| basename + |--| purebasename + |--| ext + """ + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(',') ) + append = res.append + for name in args: + if name == 'drive': + append(parts[0]) + elif name == 'dirname': + append(self.sep.join(['']+parts[1:-1])) + else: + basename = parts[-1] + if name == 'basename': + append(basename) + else: + i = basename.rfind('.') + if i == -1: + purebasename, ext = basename, '' + else: + purebasename, ext = basename[:i], basename[i:] + if name == 'purebasename': + append(purebasename) + elif name == 'ext': + append(ext) + else: + raise ValueError("invalid part specification %r" % name) + return res + + def join(self, *args, **kwargs): + """ return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + if not args: + return self + strpath = self.strpath + sep = self.sep + strargs = [str(x) for x in args] + if kwargs.get('abs', 0): + for i in range(len(strargs)-1, -1, -1): + if os.path.isabs(strargs[i]): + strpath = strargs[i] + strargs = strargs[i+1:] + break + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip('/') + arg = arg.replace('/', sep) + if arg: + if not strpath.endswith(sep): + strpath += sep + strpath += arg + obj = self.new() + obj.strpath = os.path.normpath(strpath) + return obj + + def open(self, mode='r'): + """ return an opened file with the given mode. """ + return py.error.checked_call(open, self.strpath, mode) + + def listdir(self, fil=None, sort=None): + """ list directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if isinstance(fil, str): + fil = common.FNMatcher(fil) + res = [] + for name in py.error.checked_call(os.listdir, self.strpath): + childurl = self.join(name) + if fil is None or fil(childurl): + res.append(childurl) + self._sortlist(res, sort) + return res + + def size(self): + """ return size of the underlying file object """ + return self.stat().size + + def mtime(self): + """ return last modification time of the path. """ + return self.stat().mtime + + def copy(self, target, archive=False): + """ copy path to target.""" + assert not archive, "XXX archive-mode not supported" + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self!=target + copychunked(self, target) + else: + def rec(p): + return p.check(link=0) + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + + def rename(self, target): + """ rename this path to target. """ + return py.error.checked_call(os.rename, str(self), str(target)) + + def dump(self, obj, bin=1): + """ pickle object into path location""" + f = self.open('wb') + try: + py.error.checked_call(py.std.pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """ create & return the directory joined with args. """ + p = self.join(*args) + py.error.checked_call(os.mkdir, str(p)) + return p + + def write(self, data, mode='w'): + """ write data into path. """ + if 'b' in mode: + if not py.builtin._isbytes(data): + raise ValueError("can only process bytes") + else: + if not py.builtin._istext(data): + if not py.builtin._isbytes(data): + data = str(data) + else: + data = py.builtin._totext(data, sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except py.error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """ ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get('dir', 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open('w').close() + return p + + def stat(self): + """ Return an os.stat() tuple. """ + return Stat(self, py.error.checked_call(os.stat, self.strpath)) + + def lstat(self): + """ Return an os.lstat() tuple. """ + return Stat(self, py.error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """ set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return py.error.checked_call(os.utime, self.strpath, mtime) + try: + return py.error.checked_call(os.utime, self.strpath, (-1, mtime)) + except py.error.EINVAL: + return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """ change directory to self and return old current directory """ + old = self.__class__() + py.error.checked_call(os.chdir, self.strpath) + return old + + def realpath(self): + """ return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """ return last access time of the path. """ + return self.stat().atime + + def __repr__(self): + return 'local(%r)' % self.strpath + + def __str__(self): + """ return string representation of the Path. """ + return self.strpath + + def pypkgpath(self, pkgname=None): + """ return the path's package path by looking for the given + pkgname. If pkgname is None then look for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath can not be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if pkgname is None: + if parent.check(file=1): + continue + if parent.join('__init__.py').check(): + pkgpath = parent + continue + return pkgpath + else: + if parent.basename == pkgname: + return parent + return pkgpath + + def _prependsyspath(self, path): + s = str(path) + if s != sys.path[0]: + #print "prepending to sys.path", s + sys.path.insert(0, s) + + def chmod(self, mode, rec=0): + """ change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError("mode %r must be an integer" % (mode,)) + if rec: + for x in self.visit(rec=rec): + py.error.checked_call(os.chmod, str(x), mode) + py.error.checked_call(os.chmod, str(self), mode) + + def pyimport(self, modname=None, ensuresyspath=True): + """ return path as an imported python module. + if modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + """ + if not self.check(): + raise py.error.ENOENT(self) + #print "trying to import", self + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + if ensuresyspath: + self._prependsyspath(pkgpath.dirpath()) + pkg = __import__(pkgpath.basename, None, None, []) + names = self.new(ext='').relto(pkgpath.dirpath()) + names = names.split(self.sep) + modname = ".".join(names) + else: + # no package scope, still make it possible + if ensuresyspath: + self._prependsyspath(self.dirpath()) + modname = self.purebasename + mod = __import__(modname, None, None, ['__doc__']) + modfile = mod.__file__ + if modfile[-4:] in ('.pyc', '.pyo'): + modfile = modfile[:-1] + elif modfile.endswith('$py.class'): + modfile = modfile[:-9] + '.py' + if not self.samefile(modfile): + raise EnvironmentError("mismatch:\n" + "imported module %r\n" + "does not stem from %r\n" + "maybe __init__.py files are missing?" % (mod, str(self))) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + mod = py.std.types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + py.builtin.execfile(str(self), mod.__dict__) + except: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv, **popen_opts): + """ return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import Popen, PIPE + argv = map(str, argv) + popen_opts['stdout'] = popen_opts['stderr'] = PIPE + proc = Popen([str(self)] + list(argv), **popen_opts) + stdout, stderr = proc.communicate() + ret = proc.wait() + if py.builtin._isbytes(stdout): + stdout = py.builtin._totext(stdout, sys.getdefaultencoding()) + if ret != 0: + if py.builtin._isbytes(stderr): + stderr = py.builtin._totext(stderr, sys.getdefaultencoding()) + raise py.process.cmdexec.Error(ret, ret, str(self), + stdout, stderr,) + return stdout + + def sysfind(cls, name, checker=None): + """ return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if os.path.isabs(name): + p = py.path.local(name) + if p.check(file=1): + return p + else: + if iswin32: + paths = py.std.os.environ['Path'].split(';') + if '' not in paths and '.' not in paths: + paths.append('.') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + paths = [re.sub('%SystemRoot%', systemroot, path) + for path in paths] + tryadd = '', '.exe', '.com', '.bat' # XXX add more? + else: + paths = py.std.os.environ['PATH'].split(':') + tryadd = ('',) + + for x in paths: + for addext in tryadd: + p = py.path.local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except py.error.EACCES: + pass + return None + sysfind = classmethod(sysfind) + + def _gethomedir(cls): + try: + x = os.environ['HOME'] + except KeyError: + x = os.environ['HOMEPATH'] + return cls(x) + _gethomedir = classmethod(_gethomedir) + + #""" + #special class constructors for local filesystem paths + #""" + def get_temproot(cls): + """ return the system's temporary directory + (where tempfiles are usually created in) + """ + return py.path.local(py.std.tempfile.gettempdir()) + get_temproot = classmethod(get_temproot) + + def mkdtemp(cls): + """ return a Path object pointing to a fresh new temporary directory + (which we created ourself). + """ + import tempfile + tries = 10 + for i in range(tries): + dname = tempfile.mktemp() + dpath = cls(tempfile.mktemp()) + try: + dpath.mkdir() + except (py.error.EEXIST, py.error.EPERM, py.error.EACCES): + continue + return dpath + raise py.error.ENOENT(dpath, "could not create tempdir, %d tries" % tries) + mkdtemp = classmethod(mkdtemp) + + def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, + lock_timeout = 172800): # two days + """ return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + def parse_num(path): + """ parse the number out of a path (if it matches the prefix) """ + bn = path.basename + if bn.startswith(prefix): + try: + return int(bn[len(prefix):]) + except ValueError: + pass + + # compute the maximum number currently in use with the + # prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum+1)) + except py.error.EEXIST: + # race condition: another thread/process created the dir + # in the meantime. Try counting again + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + # put a .lock file in the new directory that will be removed at + # process exit + if lock_timeout: + lockfile = udir.join('.lock') + mypid = os.getpid() + if hasattr(lockfile, 'mksymlinkto'): + lockfile.mksymlinkto(str(mypid)) + else: + lockfile.write(str(mypid)) + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except py.error.Error: + pass + atexit.register(try_remove_lockfile) + + # prune old directories + if keep: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + lf = path.join('.lock') + try: + t1 = lf.lstat().mtime + t2 = lockfile.lstat().mtime + if not lock_timeout or abs(t2-t1) < lock_timeout: + continue # skip directories still locked + except py.error.Error: + pass # assume that it means that there is no 'lf' + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except: # this might be py.error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = 'current' + + src = str(udir) + dest = src[:src.rfind('-')] + '-' + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError): # AttributeError on win32 + pass + + return udir + make_numbered_dir = classmethod(make_numbered_dir) + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open('rb') + try: + fdest = dest.open('wb') + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + +def autopath(globs=None): + """ (deprecated) return the (local) path of the "current" file pointed to by globals or - if it is none - alternatively the callers frame globals. + + the path will always point to a .py file or to None. + the path will have the following payload: + pkgdir is the last parent directory path containing __init__.py + """ + py.log._apiwarn("1.1", "py.magic.autopath deprecated, " + "use py.path.local(__file__) and maybe pypkgpath/pyimport().") + if globs is None: + globs = sys._getframe(1).f_globals + try: + __file__ = globs['__file__'] + except KeyError: + if not sys.argv[0]: + raise ValueError("cannot compute autopath in interactive mode") + __file__ = os.path.abspath(sys.argv[0]) + + ret = py.path.local(__file__) + if ret.ext in ('.pyc', '.pyo'): + ret = ret.new(ext='.py') + current = pkgdir = ret.dirpath() + while 1: + if current.join('__init__.py').check(): + pkgdir = current + current = current.dirpath() + if pkgdir != current: + continue + elif str(current) not in sys.path: + sys.path.insert(0, str(current)) + break + ret.pkgdir = pkgdir + return ret + --- /dev/null +++ b/py/__metainfo.py @@ -0,0 +1,2 @@ +import py +pydir = py.path.local(py.__file__).dirpath() --- a/doc/test/plugin/xdist.txt +++ b/doc/test/plugin/xdist.txt @@ -26,6 +26,7 @@ program source code to the remote place. are reported back and displayed to your local test session. You may specify different Python versions and interpreters. +.. _`pytest-xdist`: http://pytest.org/plugin/xdist.html Usage examples --------------------- --- /dev/null +++ b/py/_path/gateway/channeltest2.py @@ -0,0 +1,21 @@ +import py +from remotepath import RemotePath + + +SRC = open('channeltest.py', 'r').read() + +SRC += ''' +import py +srv = PathServer(channel.receive()) +channel.send(srv.p2c(py.path.local("/tmp"))) +''' + + +#gw = execnet.SshGateway('codespeak.net') +gw = execnet.PopenGateway() +gw.remote_init_threads(5) +c = gw.remote_exec(SRC, stdout=py.std.sys.stdout, stderr=py.std.sys.stderr) +subchannel = gw._channelfactory.new() +c.send(subchannel) + +p = RemotePath(subchannel, c.receive()) --- /dev/null +++ b/py/_cmdline/pytest.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python +import py + +def main(args): + py.test.cmdline.main(args) --- /dev/null +++ b/py/_plugin/__init__.py @@ -0,0 +1,1 @@ +# From commits-noreply at bitbucket.org Wed Jan 13 18:07:14 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 17:07:14 +0000 (UTC) Subject: [py-svn] py-trunk commit 196d3c7e0809: reduce usage of the global py.test.config which maybe should die or become less global at some point (along with py.test.ensuretemp) Message-ID: <20100113170714.9890B7EE8A@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263402298 -3600 # Node ID 196d3c7e080933d864d03eec31ef0fb8837f742b # Parent 92022b94ebd9777ecbf318bc77a1432e91f62ea0 reduce usage of the global py.test.config which maybe should die or become less global at some point (along with py.test.ensuretemp) --- a/ISSUES.txt +++ b/ISSUES.txt @@ -51,14 +51,23 @@ but a remote one fail because the tests does not contain an "__init__.py". Either give an error or make it work without the __init__.py -deprecate ensuretemp / introduce funcargs to setup method +consider globals: py.test.ensuretemp and config -------------------------------------------------------------- tags: experimental-wish 1.2 -The remaining uses of py.test.ensuretemp within the py-test base -itself are for setup methods. Also users have expressed the -wish to have funcargs available to setup functions. Experiment -with allowing funcargs there and finalizing deprecating py.test.ensuretemp. +consider deprecating py.test.ensuretemp and py.test.config +to further reduce py.test globality. Also consider +having py.test.config and ensuretemp coming from +a plugin rather than being there from the start. + +consider allowing funcargs to setup methods +-------------------------------------------------------------- +tags: experimental-wish 1.2 + +Users have expressed the wish to have funcargs available to setup +functions. Experiment with allowing funcargs there - it might +also help to make the py.test.ensuretemp and config deprecation. + outsource figleaf plugin --------------------------------------- --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -19,8 +19,8 @@ class TestGeneralUsage: """) testdir.makepyfile(test_one=""" import py - def test_option(): - assert py.test.config.option.xyz == "123" + def test_option(pytestconfig): + assert pytestconfig.option.xyz == "123" """) result = testdir.runpytest("-p", "xyz", "--xyz=123") assert result.ret == 0 --- a/testing/io_/test_terminalwriter.py +++ b/testing/io_/test_terminalwriter.py @@ -44,46 +44,69 @@ def test_unicode_encoding(): tw.line(msg) assert l[0].strip() == msg.encode(encoding) -class BaseTests: - def test_line(self): - tw = self.getwriter() +class TestTerminalWriter: + def pytest_generate_tests(self, metafunc): + if "tw" in metafunc.funcargnames: + metafunc.addcall(id="path", param="path") + metafunc.addcall(id="stringio", param="stringio") + metafunc.addcall(id="callable", param="callable") + def pytest_funcarg__tw(self, request): + if request.param == "path": + tmpdir = request.getfuncargvalue("tmpdir") + p = tmpdir.join("tmpfile") + tw = py.io.TerminalWriter(p.open('w+')) + def getlines(): + tw._file.flush() + return p.open('r').readlines() + elif request.param == "stringio": + tw = py.io.TerminalWriter(stringio=True) + def getlines(): + tw.stringio.seek(0) + return tw.stringio.readlines() + elif request.param == "callable": + writes = [] + tw = py.io.TerminalWriter(writes.append) + def getlines(): + io = py.io.TextIO() + io.write("".join(writes)) + io.seek(0) + return io.readlines() + tw.getlines = getlines + return tw + + def test_line(self, tw): tw.line("hello") - l = self.getlines() + l = tw.getlines() assert len(l) == 1 assert l[0] == "hello\n" - def test_line_unicode(self): - tw = self.getwriter() + def test_line_unicode(self, tw): for encoding in 'utf8', 'latin1': tw._encoding = encoding msg = py.builtin._totext('b\u00f6y', 'utf8') tw.line(msg) - l = self.getlines() + l = tw.getlines() assert l[0] == msg + "\n" - def test_sep_no_title(self): - tw = self.getwriter() + def test_sep_no_title(self, tw): tw.sep("-", fullwidth=60) - l = self.getlines() + l = tw.getlines() assert len(l) == 1 assert l[0] == "-" * 60 + "\n" - def test_sep_with_title(self): - tw = self.getwriter() + def test_sep_with_title(self, tw): tw.sep("-", "hello", fullwidth=60) - l = self.getlines() + l = tw.getlines() assert len(l) == 1 assert l[0] == "-" * 26 + " hello " + "-" * 27 + "\n" @py.test.mark.skipif("sys.platform == 'win32'") - def test__escaped(self): - tw = self.getwriter() + def test__escaped(self, tw): text2 = tw._escaped("hello", (31)) assert text2.find("hello") != -1 @py.test.mark.skipif("sys.platform == 'win32'") - def test_markup(self): - tw = self.getwriter() + def test_markup(self, tw): for bold in (True, False): for color in ("red", "green"): text2 = tw.markup("hello", **{color: True, 'bold': bold}) @@ -91,53 +114,22 @@ class BaseTests: py.test.raises(ValueError, "tw.markup('x', wronkw=3)") py.test.raises(ValueError, "tw.markup('x', wronkw=0)") - def test_line_write_markup(self): - tw = self.getwriter() + def test_line_write_markup(self, tw): tw.hasmarkup = True tw.line("x", bold=True) tw.write("x\n", red=True) - l = self.getlines() + l = tw.getlines() if sys.platform != "win32": assert len(l[0]) > 2, l assert len(l[1]) > 2, l - def test_attr_fullwidth(self): - tw = self.getwriter() + def test_attr_fullwidth(self, tw): tw.sep("-", "hello", fullwidth=70) tw.fullwidth = 70 tw.sep("-", "hello") - l = self.getlines() + l = tw.getlines() assert len(l[0]) == len(l[1]) -class TestTmpfile(BaseTests): - def getwriter(self): - self.path = py.test.config.ensuretemp("terminalwriter").ensure("tmpfile") - self.tw = py.io.TerminalWriter(self.path.open('w+')) - return self.tw - def getlines(self): - io = self.tw._file - io.flush() - return self.path.open('r').readlines() - -class TestWithStringIO(BaseTests): - def getwriter(self): - self.tw = py.io.TerminalWriter(stringio=True) - return self.tw - def getlines(self): - io = self.tw.stringio - io.seek(0) - return io.readlines() - -class TestCallableFile(BaseTests): - def getwriter(self): - self.writes = [] - return py.io.TerminalWriter(self.writes.append) - - def getlines(self): - io = py.io.TextIO() - io.write("".join(self.writes)) - io.seek(0) - return io.readlines() def test_attr_hasmarkup(): tw = py.io.TerminalWriter(stringio=True) @@ -146,6 +138,3 @@ def test_attr_hasmarkup(): tw.line("hello", bold=True) s = tw.stringio.getvalue() assert len(s) > len("hello") - - - --- a/testing/test_funcargs.py +++ b/testing/test_funcargs.py @@ -391,15 +391,15 @@ class TestGenfuncFunctional: assert request._pyfuncitem._genid == "0" return request.param - def test_function(metafunc): - assert metafunc.config == py.test.config + def test_function(metafunc, pytestconfig): + assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ assert metafunc.function == test_function assert metafunc.cls is None class TestClass: - def test_method(self, metafunc): - assert metafunc.config == py.test.config + def test_method(self, metafunc, pytestconfig): + assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ if py.std.sys.version_info > (3, 0): unbound = TestClass.test_method --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -74,8 +74,8 @@ class TestBootstrapping: x500 = testdir.makepyfile(pytest_x500="#") p = testdir.makepyfile(""" import py - def test_hello(): - plugin = py.test.config.pluginmanager.getplugin('x500') + def test_hello(pytestconfig): + plugin = pytestconfig.pluginmanager.getplugin('x500') assert plugin is not None """) monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") From commits-noreply at bitbucket.org Wed Jan 13 18:17:40 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 17:17:40 +0000 (UTC) Subject: [py-svn] py-trunk commit 1ebedf9c2a20: some issue soritng related to the 1.2 series Message-ID: <20100113171740.716A47EE75@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263403044 -3600 # Node ID 1ebedf9c2a2053cfd53c5671758120e78bfafbf6 # Parent 196d3c7e080933d864d03eec31ef0fb8837f742b some issue soritng related to the 1.2 series --- a/ISSUES.txt +++ b/ISSUES.txt @@ -41,16 +41,6 @@ tags: feature there is no way to induce py.test to display the full tracebacks of the expected failure. Introduce one. -relax requirement to have tests/testing contain an __init__ ----------------------------------------------------------------- -tags: feature 1.2 -bb: http://bitbucket.org/hpk42/py-trunk/issue/64 - -A local test run of a "tests" directory may work -but a remote one fail because the tests directory -does not contain an "__init__.py". Either give -an error or make it work without the __init__.py - consider globals: py.test.ensuretemp and config -------------------------------------------------------------- tags: experimental-wish 1.2 @@ -68,15 +58,6 @@ Users have expressed the wish to have fu functions. Experiment with allowing funcargs there - it might also help to make the py.test.ensuretemp and config deprecation. - -outsource figleaf plugin ---------------------------------------- -tags: 1.2 - -Packages with external dependencies should be moved out -of the core distribution. Also figleaf could serve as -another prototype for an external plugin. - consider pytest_addsyspath hook ----------------------------------------- tags: 1.2 @@ -87,3 +68,32 @@ in order to more easily run against inst Alternatively it could also be done via the config object and pytest_configure. + +relax requirement to have tests/testing contain an __init__ +---------------------------------------------------------------- +tags: feature 1.2 +bb: http://bitbucket.org/hpk42/py-trunk/issue/64 + +A local test run of a "tests" directory may work +but a remote one fail because the tests directory +does not contain an "__init__.py". Either give +an error or make it work without the __init__.py + + +show plugin information in test header +---------------------------------------------------------------- +tags: feature 1.2 + +Now that external plugins are becoming more numerous +it would be useful to have external plugins along with +their versions displayed as a header line. + +generate/deal with plugin docs +---------------------------------------------------------------- +tags: feature 1.2 + +review and prepare docs for 1.2.0 release. Probably +have docs living with the plugin and require them to +be available on doc generation time, at least when +the target is the website? Or rather go for interactive help? + From commits-noreply at bitbucket.org Wed Jan 13 18:20:53 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 13 Jan 2010 17:20:53 +0000 (UTC) Subject: [py-svn] py-trunk commit 88f7df8ab4fb: another wish Message-ID: <20100113172053.9408E7EE76@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263403238 -3600 # Node ID 88f7df8ab4fbc2f5b328a16fbf106fbe297d871d # Parent 1ebedf9c2a2053cfd53c5671758120e78bfafbf6 another wish --- a/ISSUES.txt +++ b/ISSUES.txt @@ -97,3 +97,13 @@ have docs living with the plugin and req be available on doc generation time, at least when the target is the website? Or rather go for interactive help? +improved reporting on funcarg usage / name mismatches +---------------------------------------------------------------- +tags: feature 1.2 + +see to improve help and support for funcarg usage, +i.e. when a funcarg does not match any provided one. +Also consider implementing py.test --funcargs to +show available funcargs - it should honour the +path::TestClass syntax so one can easily inspect +where funcargs come from or which are available. From commits-noreply at bitbucket.org Fri Jan 15 17:52:34 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 15 Jan 2010 16:52:34 +0000 (UTC) Subject: [py-svn] py-trunk commit ad9c92b6aca6: remove superflous building of a dict, preserve order for nodes that have identical file:lineno Message-ID: <20100115165234.62F387EEE5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263573489 -3600 # Node ID ad9c92b6aca6623367a68936c37e30b70b077178 # Parent 88f7df8ab4fbc2f5b328a16fbf106fbe297d871d remove superflous building of a dict, preserve order for nodes that have identical file:lineno --- a/py/_test/pycollect.py +++ b/py/_test/pycollect.py @@ -76,18 +76,12 @@ class PyCollectorMixin(PyobjMixin, py.te l = self._deprecated_collect() if l is not None: return l - name2items = self._buildname2items() - colitems = list(name2items.values()) - colitems.sort(key=lambda item: item.reportinfo()[:2]) - return colitems - - def _buildname2items(self): # NB. we avoid random getattrs and peek in the __dict__ instead - d = {} dicts = [getattr(self.obj, '__dict__', {})] for basecls in inspect.getmro(self.obj.__class__): dicts.append(basecls.__dict__) seen = {} + l = [] for dic in dicts: for name, obj in dic.items(): if name in seen: @@ -96,8 +90,9 @@ class PyCollectorMixin(PyobjMixin, py.te if name[0] != "_": res = self.makeitem(name, obj) if res is not None: - d[name] = res - return d + l.append(res) + l.sort(key=lambda item: item.reportinfo()[:2]) + return l def _deprecated_join(self, name): if self.__class__.join != py.test.collect.Collector.join: --- a/testing/test_pycollect.py +++ b/testing/test_pycollect.py @@ -333,7 +333,7 @@ class TestConftestCustomization: l = [] monkeypatch.setattr(py.test.collect.Module, 'makeitem', lambda self, name, obj: l.append(name)) - modcol._buildname2items() + l = modcol.collect() assert '_hello' not in l From commits-noreply at bitbucket.org Fri Jan 15 17:52:36 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 15 Jan 2010 16:52:36 +0000 (UTC) Subject: [py-svn] py-trunk commit 1464edd0ddb6: get rid of the funccollector node, which nice-ifies names of funcarg-generated tests nodes, also test and fix one anomaly wrt to funcarg setups and instance uniqueness Message-ID: <20100115165236.573E97EEDE@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263574202 -3600 # Node ID 1464edd0ddb6477d14537505db642e80e1ceb1a2 # Parent ad9c92b6aca6623367a68936c37e30b70b077178 get rid of the funccollector node, which nice-ifies names of funcarg-generated tests nodes, also test and fix one anomaly wrt to funcarg setups and instance uniqueness --- a/py/_test/funcargs.py +++ b/py/_test/funcargs.py @@ -60,31 +60,6 @@ class Metafunc: self._ids.add(id) self._calls.append(CallSpec(funcargs, id, param)) -class FunctionCollector(py.test.collect.Collector): - def __init__(self, name, parent, calls): - super(FunctionCollector, self).__init__(name, parent) - self.calls = calls - self.obj = getattr(self.parent.obj, name) - - def collect(self): - l = [] - for callspec in self.calls: - name = "%s[%s]" %(self.name, callspec.id) - function = self.parent.Function(name=name, parent=self, - callspec=callspec, callobj=self.obj) - l.append(function) - return l - - def reportinfo(self): - try: - return self._fslineno, self.name - except AttributeError: - pass - fspath, lineno = py.code.getfslineno(self.obj) - self._fslineno = fspath, lineno - return fspath, lineno, self.name - - class FuncargRequest: _argprefix = "pytest_funcarg__" _argname = None --- a/py/_test/pycollect.py +++ b/py/_test/pycollect.py @@ -89,8 +89,11 @@ class PyCollectorMixin(PyobjMixin, py.te seen[name] = True if name[0] != "_": res = self.makeitem(name, obj) - if res is not None: - l.append(res) + if res is None: + continue + if not isinstance(res, list): + res = [res] + l.extend(res) l.sort(key=lambda item: item.reportinfo()[:2]) return l @@ -122,9 +125,13 @@ class PyCollectorMixin(PyobjMixin, py.te gentesthook.pcall(plugins, metafunc=metafunc) if not metafunc._calls: return self.Function(name, parent=self) - return funcargs.FunctionCollector(name=name, - parent=self, calls=metafunc._calls) - + l = [] + for callspec in metafunc._calls: + subname = "%s[%s]" %(name, callspec.id) + function = self.Function(name=subname, parent=self, + callspec=callspec, callobj=funcobj) + l.append(function) + return l class Module(py.test.collect.File, PyCollectorMixin): def _getobj(self): @@ -313,6 +320,13 @@ class Function(FunctionMixin, py.test.co self._obj = callobj self.function = getattr(self.obj, 'im_func', self.obj) + def _getobj(self): + name = self.name + i = name.find("[") # parametrization + if i != -1: + name = name[:i] + return getattr(self.parent.obj, name) + def _isyieldedfunction(self): return self._args is not None --- a/doc/example/assertion/test_failures.py +++ b/doc/example/assertion/test_failures.py @@ -11,4 +11,4 @@ def test_failure_demo_fails_properly(tes assert failed == 20, failed colreports = reprec.getreports("pytest_collectreport") failed = len([x.failed for x in colreports]) - assert failed == 4 + assert failed == 3 --- a/testing/test_funcargs.py +++ b/testing/test_funcargs.py @@ -498,6 +498,24 @@ class TestGenfuncFunctional: "*1 passed*" ]) + def test_two_functions_not_same_instance(self, testdir): + p = testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.addcall({'arg1': 10}) + metafunc.addcall({'arg1': 20}) + + class TestClass: + def test_func(self, arg1): + assert not hasattr(self, 'x') + self.x = 1 + """) + result = testdir.runpytest("-v", p) + assert result.stdout.fnmatch_lines([ + "*test_func*0*PASS*", + "*test_func*1*PASS*", + "*2 pass*", + ]) + def test_conftest_funcargs_only_available_in_subdir(testdir): sub1 = testdir.mkpydir("sub1") From commits-noreply at bitbucket.org Fri Jan 15 18:45:27 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Fri, 15 Jan 2010 17:45:27 +0000 (UTC) Subject: [py-svn] py-trunk commit cd8564b53dee: fix test_importall to not stop on skipped plugins and fix the uncovered failure of genscript: standalone.py template is now safely importable Message-ID: <20100115174527.AA2477EEE5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263577506 -3600 # Node ID cd8564b53dee0a6eada8e73f9cd404d05c6c102b # Parent 1464edd0ddb6477d14537505db642e80e1ceb1a2 fix test_importall to not stop on skipped plugins and fix the uncovered failure of genscript: standalone.py template is now safely importable --- a/py/_plugin/standalonetemplate.py +++ b/py/_plugin/standalonetemplate.py @@ -8,18 +8,10 @@ import base64 import zlib import imp -if sys.version_info >= (3,0): - exec("def do_exec(co, loc): exec(co, loc)\n") - import pickle - sources = sources.encode("ascii") # ensure bytes - sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) -else: - import cPickle as pickle - exec("def do_exec(co, loc): exec co in loc\n") - sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) +class DictImporter(object): + def __init__(self, sources): + self.sources = sources -class DictImporter(object): - sources = sources def find_module(self, fullname, path=None): if fullname in self.sources: return self @@ -53,12 +45,19 @@ class DictImporter(object): res = self.sources.get(name+'.__init__') return res +if __name__ == "__main__": + if sys.version_info >= (3,0): + exec("def do_exec(co, loc): exec(co, loc)\n") + import pickle + sources = sources.encode("ascii") # ensure bytes + sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) + else: + import cPickle as pickle + exec("def do_exec(co, loc): exec co in loc\n") + sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) + importer = DictImporter(sources) + sys.meta_path.append(importer) -importer = DictImporter() - -sys.meta_path.append(importer) - -if __name__ == "__main__": import py py.cmdline.pytest() --- a/testing/root/test_py_imports.py +++ b/testing/root/test_py_imports.py @@ -1,6 +1,7 @@ import py import types import sys +from py._test.outcome import Skipped def checksubpackage(name): obj = getattr(py, name) @@ -29,7 +30,6 @@ def test_importall(): nodirs = [ base.join('_path', 'gateway',), base.join('_code', 'oldmagic.py'), - base.join('_compat', 'testing'), ] if sys.version_info >= (3,0): nodirs.append(base.join('_code', '_assertionold.py')) @@ -50,7 +50,10 @@ def test_importall(): else: relpath = relpath.replace(base.sep, '.') modpath = 'py.%s' % relpath - check_import(modpath) + try: + check_import(modpath) + except Skipped: + pass def check_import(modpath): py.builtin.print_("checking import", modpath) From commits-noreply at bitbucket.org Sat Jan 16 19:41:25 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 16 Jan 2010 18:41:25 +0000 (UTC) Subject: [py-svn] py-trunk commit e0736a9c6877: kill unused code Message-ID: <20100116184125.CF8397EEE5@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263667265 -3600 # Node ID e0736a9c6877d61b2eacdc92ce437a8795f78345 # Parent cd8564b53dee0a6eada8e73f9cd404d05c6c102b kill unused code --- a/py/_test/compat.py +++ /dev/null @@ -1,58 +0,0 @@ -import py - -from py.test.collect import Function - -class TestCaseUnit(Function): - """ compatibility Unit executor for TestCase methods - honouring setUp and tearDown semantics. - """ - def runtest(self, _deprecated=None): - boundmethod = self.obj - instance = py.builtin._getimself(boundmethod) - instance.setUp() - try: - boundmethod() - finally: - instance.tearDown() - -class TestCase(object): - """compatibility class of unittest's TestCase. """ - Function = TestCaseUnit - - def setUp(self): - pass - - def tearDown(self): - pass - - def fail(self, msg=None): - """ fail immediate with given message. """ - py.test.fail(msg) - - def assertRaises(self, excclass, func, *args, **kwargs): - py.test.raises(excclass, func, *args, **kwargs) - failUnlessRaises = assertRaises - - # dynamically construct (redundant) methods - aliasmap = [ - ('x', 'not x', 'assert_, failUnless'), - ('x', 'x', 'failIf'), - ('x,y', 'x!=y', 'failUnlessEqual,assertEqual, assertEquals'), - ('x,y', 'x==y', 'failIfEqual,assertNotEqual, assertNotEquals'), - ] - items = [] - for sig, expr, names in aliasmap: - names = map(str.strip, names.split(',')) - sigsubst = expr.replace('y', '%s').replace('x', '%s') - for name in names: - items.append(""" - def %(name)s(self, %(sig)s, msg=""): - __tracebackhide__ = True - if %(expr)s: - py.test.fail(msg=msg + (%(sigsubst)r %% (%(sig)s))) - """ % locals() ) - - source = "".join(items) - exec(py.code.Source(source).compile()) - -__all__ = ['TestCase'] --- a/testing/test_compat.py +++ /dev/null @@ -1,53 +0,0 @@ -from __future__ import generators -import py -from py._test.compat import TestCase -from py._test.outcome import Failed - -class TestCompatTestCaseSetupSemantics(TestCase): - globlist = [] - - def setUp(self): - self.__dict__.setdefault('l', []).append(42) - self.globlist.append(self) - - def tearDown(self): - self.l.pop() - - def test_issetup(self): - l = self.l - assert len(l) == 1 - assert l[-1] == 42 - #self.checkmultipleinstances() - - def test_issetup2(self): - l = self.l - assert len(l) == 1 - assert l[-1] == 42 - #self.checkmultipleinstances() - - #def checkmultipleinstances(self): - # for x,y in zip(self.globlist, self.globlist[1:]): - # assert x is not y - -class TestCompatAssertions(TestCase): - nameparamdef = { - 'failUnlessEqual,assertEqual,assertEquals': ('1, 1', '1, 0'), - 'assertNotEquals,failIfEqual': ('0, 1', '0,0'), - 'failUnless,assert_': ('1', 'None'), - 'failIf': ('0', '1'), - } - - sourcelist = [] - for names, (paramok, paramfail) in nameparamdef.items(): - for name in names.split(','): - source = """ - def test_%(name)s(self): - self.%(name)s(%(paramok)s) - #self.%(name)s(%(paramfail)s) - - def test_%(name)s_failing(self): - self.assertRaises(Failed, - self.%(name)s, %(paramfail)s) - """ % locals() - co = py.code.Source(source).compile() - exec(co) From commits-noreply at bitbucket.org Sat Jan 16 23:34:05 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sat, 16 Jan 2010 22:34:05 +0000 (UTC) Subject: [py-svn] py-trunk commit e693d93b84de: rename logxml plugin to junitxml Message-ID: <20100116223405.920FA7EEDE@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263681206 -3600 # Node ID e693d93b84def64b7c8806fe6fcb53389a5878de # Parent e0736a9c6877d61b2eacdc92ce437a8795f78345 rename logxml plugin to junitxml --- a/py/_plugin/pytest_logxml.py +++ /dev/null @@ -1,156 +0,0 @@ -""" - logging of test results in JUnit-XML format, for use with Hudson - and build integration servers. Based on initial code from Ross Lawley. -""" - -import py -import time - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group.addoption('--xml', action="store", dest="xmlpath", - metavar="path", default=None, - help="create junit-xml style report file at the given path.") - -def pytest_configure(config): - xmlpath = config.option.xmlpath - if xmlpath: - config._xml = LogXML(xmlpath) - config.pluginmanager.register(config._xml) - -def pytest_unconfigure(config): - xml = getattr(config, '_xml', None) - if xml: - del config._xml - config.pluginmanager.unregister(xml) - -class LogXML(object): - def __init__(self, logfile): - self.logfile = logfile - self.test_logs = [] - self.passed = self.skipped = 0 - self.failed = self.errors = 0 - self._durations = {} - - def _opentestcase(self, report): - node = report.item - d = {'time': self._durations.pop(report.item, "0")} - names = [x.replace(".py", "") for x in node.listnames() if x != "()"] - d['classname'] = ".".join(names[:-1]) - d['name'] = names[-1] - attrs = ['%s="%s"' % item for item in sorted(d.items())] - self.test_logs.append("\n" % " ".join(attrs)) - - def _closetestcase(self): - self.test_logs.append("") - - def append_pass(self, report): - self.passed += 1 - self._opentestcase(report) - self._closetestcase() - - def append_failure(self, report): - self._opentestcase(report) - s = py.xml.escape(str(report.longrepr)) - #msg = str(report.longrepr.reprtraceback.extraline) - self.test_logs.append( - '%s' % (s)) - self._closetestcase() - self.failed += 1 - - def _opentestcase_collectfailure(self, report): - node = report.collector - d = {'time': '???'} - names = [x.replace(".py", "") for x in node.listnames() if x != "()"] - d['classname'] = ".".join(names[:-1]) - d['name'] = names[-1] - attrs = ['%s="%s"' % item for item in sorted(d.items())] - self.test_logs.append("\n" % " ".join(attrs)) - - def append_collect_failure(self, report): - self._opentestcase_collectfailure(report) - s = py.xml.escape(str(report.longrepr)) - #msg = str(report.longrepr.reprtraceback.extraline) - self.test_logs.append( - '%s' % (s)) - self._closetestcase() - self.errors += 1 - - def append_collect_skipped(self, report): - self._opentestcase_collectfailure(report) - s = py.xml.escape(str(report.longrepr)) - #msg = str(report.longrepr.reprtraceback.extraline) - self.test_logs.append( - '%s' % (s)) - self._closetestcase() - self.skipped += 1 - - def append_error(self, report): - self._opentestcase(report) - s = py.xml.escape(str(report.longrepr)) - self.test_logs.append( - '%s' % s) - self._closetestcase() - self.errors += 1 - - def append_skipped(self, report): - self._opentestcase(report) - self.test_logs.append("") - self._closetestcase() - self.skipped += 1 - - def pytest_runtest_logreport(self, report): - if report.passed: - self.append_pass(report) - elif report.failed: - if report.when != "call": - self.append_error(report) - else: - self.append_failure(report) - elif report.skipped: - self.append_skipped(report) - - def pytest_runtest_call(self, item, __multicall__): - start = time.time() - try: - return __multicall__.execute() - finally: - self._durations[item] = time.time() - start - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - self.append_collect_failure(report) - else: - self.append_collect_skipped(report) - - def pytest_internalerror(self, excrepr): - self.errors += 1 - data = py.xml.escape(str(excrepr)) - self.test_logs.append( - '\n' - ' ' - '%s' % data) - - def pytest_sessionstart(self, session): - self.suite_start_time = time.time() - - def pytest_sessionfinish(self, session, exitstatus, __multicall__): - logfile = open(self.logfile, 'w', 1) # line buffered - suite_stop_time = time.time() - suite_time_delta = suite_stop_time - self.suite_start_time - numtests = self.passed + self.failed - logfile.write('') - logfile.writelines(self.test_logs) - logfile.write('') - logfile.close() - tw = session.config.pluginmanager.getplugin("terminalreporter")._tw - tw.line() - tw.sep("-", "generated xml file: %s" %(self.logfile)) --- a/testing/plugin/test_pytest_logxml.py +++ /dev/null @@ -1,149 +0,0 @@ - -from xml.dom import minidom - -def runandparse(testdir, *args): - resultpath = testdir.tmpdir.join("junit.xml") - result = testdir.runpytest("--xml=%s" % resultpath, *args) - xmldoc = minidom.parse(str(resultpath)) - return result, xmldoc - -def assert_attr(node, **kwargs): - for name, expected in kwargs.items(): - anode = node.getAttributeNode(name) - assert anode, "node %r has no attribute %r" %(node, name) - val = anode.value - assert val == str(expected) - -class TestPython: - def test_summing_simple(self, testdir): - testdir.makepyfile(""" - import py - def test_pass(): - pass - def test_fail(): - assert 0 - def test_skip(): - py.test.skip("") - """) - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, errors=0, failures=1, skips=1, tests=2) - - def test_setup_error(self, testdir): - testdir.makepyfile(""" - def pytest_funcarg__arg(request): - raise ValueError() - def test_function(arg): - pass - """) - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, errors=1, tests=0) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - classname="test_setup_error.test_setup_error", - name="test_function") - fnode = tnode.getElementsByTagName("error")[0] - assert_attr(fnode, message="test setup failure") - assert "ValueError" in fnode.toxml() - - def test_classname_instance(self, testdir): - testdir.makepyfile(""" - class TestClass: - def test_method(self): - assert 0 - """) - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, failures=1) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - classname="test_classname_instance.test_classname_instance.TestClass", - name="test_method") - - def test_internal_error(self, testdir): - testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0") - testdir.makepyfile("def test_function(): pass") - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, errors=1, tests=0) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, classname="pytest", name="internal") - fnode = tnode.getElementsByTagName("error")[0] - assert_attr(fnode, message="internal error") - assert "Division" in fnode.toxml() - - def test_failure_function(self, testdir): - testdir.makepyfile("def test_fail(): raise ValueError(42)") - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, failures=1, tests=1) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - classname="test_failure_function.test_failure_function", - name="test_fail") - fnode = tnode.getElementsByTagName("failure")[0] - assert_attr(fnode, message="test failure") - assert "ValueError" in fnode.toxml() - - def test_collect_error(self, testdir): - testdir.makepyfile("syntax error") - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, errors=1, tests=0) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - #classname="test_collect_error", - name="test_collect_error") - fnode = tnode.getElementsByTagName("failure")[0] - assert_attr(fnode, message="collection failure") - assert "invalid syntax" in fnode.toxml() - - def test_collect_skipped(self, testdir): - testdir.makepyfile("import py ; py.test.skip('xyz')") - result, dom = runandparse(testdir) - assert not result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, skips=1, tests=0) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - #classname="test_collect_error", - name="test_collect_skipped") - fnode = tnode.getElementsByTagName("skipped")[0] - assert_attr(fnode, message="collection skipped") - -class TestNonPython: - def test_summing_simple(self, testdir): - testdir.makeconftest(""" - import py - def pytest_collect_file(path, parent): - if path.ext == ".xyz": - return MyItem(path, parent) - class MyItem(py.test.collect.Item): - def __init__(self, path, parent): - super(MyItem, self).__init__(path.basename, parent) - self.fspath = path - def runtest(self): - raise ValueError(42) - def repr_failure(self, excinfo): - return "custom item runtest failed" - """) - testdir.tmpdir.join("myfile.xyz").write("hello") - result, dom = runandparse(testdir) - assert result.ret - node = dom.getElementsByTagName("testsuite")[0] - assert_attr(node, errors=0, failures=1, skips=0, tests=1) - tnode = node.getElementsByTagName("testcase")[0] - assert_attr(tnode, - #classname="test_collect_error", - name="myfile.xyz") - fnode = tnode.getElementsByTagName("failure")[0] - assert_attr(fnode, message="test failure") - assert "custom item runtest failed" in fnode.toxml() - --- a/py/_test/pluginmanager.py +++ b/py/_test/pluginmanager.py @@ -9,7 +9,7 @@ from py._test.outcome import Skipped default_plugins = ( "default runner capture terminal mark skipping tmpdir monkeypatch " "recwarn pdb pastebin unittest helpconfig nose assertion genscript " - "logxml doctest").split() + "junitxml doctest").split() def check_old_use(mod, modname): clsname = modname[len('pytest_'):].capitalize() + "Plugin" --- a/CHANGELOG +++ b/CHANGELOG @@ -4,8 +4,8 @@ Changes between 1.X and 1.1.1 - moved dist/looponfailing from py.test core into a new separately released pytest-xdist plugin. -- new junitxml plugin: --xml=path will generate a junit style xml file - which is parseable e.g. by the hudson continous integration server. +- new junitxml plugin: --junitxml=path will generate a junit style xml file + which is processable e.g. by the Hudson CI system. - new option: --genscript=path will generate a standalone py.test script which will not need any libraries installed. thanks to Ralf Schmitt. --- /dev/null +++ b/testing/plugin/test_pytest_junitxml.py @@ -0,0 +1,149 @@ + +from xml.dom import minidom + +def runandparse(testdir, *args): + resultpath = testdir.tmpdir.join("junit.xml") + result = testdir.runpytest("--junitxml=%s" % resultpath, *args) + xmldoc = minidom.parse(str(resultpath)) + return result, xmldoc + +def assert_attr(node, **kwargs): + for name, expected in kwargs.items(): + anode = node.getAttributeNode(name) + assert anode, "node %r has no attribute %r" %(node, name) + val = anode.value + assert val == str(expected) + +class TestPython: + def test_summing_simple(self, testdir): + testdir.makepyfile(""" + import py + def test_pass(): + pass + def test_fail(): + assert 0 + def test_skip(): + py.test.skip("") + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, errors=0, failures=1, skips=1, tests=2) + + def test_setup_error(self, testdir): + testdir.makepyfile(""" + def pytest_funcarg__arg(request): + raise ValueError() + def test_function(arg): + pass + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, errors=1, tests=0) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + classname="test_setup_error.test_setup_error", + name="test_function") + fnode = tnode.getElementsByTagName("error")[0] + assert_attr(fnode, message="test setup failure") + assert "ValueError" in fnode.toxml() + + def test_classname_instance(self, testdir): + testdir.makepyfile(""" + class TestClass: + def test_method(self): + assert 0 + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, failures=1) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + classname="test_classname_instance.test_classname_instance.TestClass", + name="test_method") + + def test_internal_error(self, testdir): + testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0") + testdir.makepyfile("def test_function(): pass") + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, errors=1, tests=0) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, classname="pytest", name="internal") + fnode = tnode.getElementsByTagName("error")[0] + assert_attr(fnode, message="internal error") + assert "Division" in fnode.toxml() + + def test_failure_function(self, testdir): + testdir.makepyfile("def test_fail(): raise ValueError(42)") + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, failures=1, tests=1) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + classname="test_failure_function.test_failure_function", + name="test_fail") + fnode = tnode.getElementsByTagName("failure")[0] + assert_attr(fnode, message="test failure") + assert "ValueError" in fnode.toxml() + + def test_collect_error(self, testdir): + testdir.makepyfile("syntax error") + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, errors=1, tests=0) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + #classname="test_collect_error", + name="test_collect_error") + fnode = tnode.getElementsByTagName("failure")[0] + assert_attr(fnode, message="collection failure") + assert "invalid syntax" in fnode.toxml() + + def test_collect_skipped(self, testdir): + testdir.makepyfile("import py ; py.test.skip('xyz')") + result, dom = runandparse(testdir) + assert not result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, skips=1, tests=0) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + #classname="test_collect_error", + name="test_collect_skipped") + fnode = tnode.getElementsByTagName("skipped")[0] + assert_attr(fnode, message="collection skipped") + +class TestNonPython: + def test_summing_simple(self, testdir): + testdir.makeconftest(""" + import py + def pytest_collect_file(path, parent): + if path.ext == ".xyz": + return MyItem(path, parent) + class MyItem(py.test.collect.Item): + def __init__(self, path, parent): + super(MyItem, self).__init__(path.basename, parent) + self.fspath = path + def runtest(self): + raise ValueError(42) + def repr_failure(self, excinfo): + return "custom item runtest failed" + """) + testdir.tmpdir.join("myfile.xyz").write("hello") + result, dom = runandparse(testdir) + assert result.ret + node = dom.getElementsByTagName("testsuite")[0] + assert_attr(node, errors=0, failures=1, skips=0, tests=1) + tnode = node.getElementsByTagName("testcase")[0] + assert_attr(tnode, + #classname="test_collect_error", + name="myfile.xyz") + fnode = tnode.getElementsByTagName("failure")[0] + assert_attr(fnode, message="test failure") + assert "custom item runtest failed" in fnode.toxml() + --- /dev/null +++ b/py/_plugin/pytest_junitxml.py @@ -0,0 +1,156 @@ +""" + logging of test results in JUnit-XML format, for use with Hudson + and build integration servers. Based on initial code from Ross Lawley. +""" + +import py +import time + +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting") + group.addoption('--junitxml', action="store", dest="xmlpath", + metavar="path", default=None, + help="create junit-xml style report file at given path.") + +def pytest_configure(config): + xmlpath = config.option.xmlpath + if xmlpath: + config._xml = LogXML(xmlpath) + config.pluginmanager.register(config._xml) + +def pytest_unconfigure(config): + xml = getattr(config, '_xml', None) + if xml: + del config._xml + config.pluginmanager.unregister(xml) + +class LogXML(object): + def __init__(self, logfile): + self.logfile = logfile + self.test_logs = [] + self.passed = self.skipped = 0 + self.failed = self.errors = 0 + self._durations = {} + + def _opentestcase(self, report): + node = report.item + d = {'time': self._durations.pop(report.item, "0")} + names = [x.replace(".py", "") for x in node.listnames() if x != "()"] + d['classname'] = ".".join(names[:-1]) + d['name'] = names[-1] + attrs = ['%s="%s"' % item for item in sorted(d.items())] + self.test_logs.append("\n" % " ".join(attrs)) + + def _closetestcase(self): + self.test_logs.append("") + + def append_pass(self, report): + self.passed += 1 + self._opentestcase(report) + self._closetestcase() + + def append_failure(self, report): + self._opentestcase(report) + s = py.xml.escape(str(report.longrepr)) + #msg = str(report.longrepr.reprtraceback.extraline) + self.test_logs.append( + '%s' % (s)) + self._closetestcase() + self.failed += 1 + + def _opentestcase_collectfailure(self, report): + node = report.collector + d = {'time': '???'} + names = [x.replace(".py", "") for x in node.listnames() if x != "()"] + d['classname'] = ".".join(names[:-1]) + d['name'] = names[-1] + attrs = ['%s="%s"' % item for item in sorted(d.items())] + self.test_logs.append("\n" % " ".join(attrs)) + + def append_collect_failure(self, report): + self._opentestcase_collectfailure(report) + s = py.xml.escape(str(report.longrepr)) + #msg = str(report.longrepr.reprtraceback.extraline) + self.test_logs.append( + '%s' % (s)) + self._closetestcase() + self.errors += 1 + + def append_collect_skipped(self, report): + self._opentestcase_collectfailure(report) + s = py.xml.escape(str(report.longrepr)) + #msg = str(report.longrepr.reprtraceback.extraline) + self.test_logs.append( + '%s' % (s)) + self._closetestcase() + self.skipped += 1 + + def append_error(self, report): + self._opentestcase(report) + s = py.xml.escape(str(report.longrepr)) + self.test_logs.append( + '%s' % s) + self._closetestcase() + self.errors += 1 + + def append_skipped(self, report): + self._opentestcase(report) + self.test_logs.append("") + self._closetestcase() + self.skipped += 1 + + def pytest_runtest_logreport(self, report): + if report.passed: + self.append_pass(report) + elif report.failed: + if report.when != "call": + self.append_error(report) + else: + self.append_failure(report) + elif report.skipped: + self.append_skipped(report) + + def pytest_runtest_call(self, item, __multicall__): + start = time.time() + try: + return __multicall__.execute() + finally: + self._durations[item] = time.time() - start + + def pytest_collectreport(self, report): + if not report.passed: + if report.failed: + self.append_collect_failure(report) + else: + self.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr): + self.errors += 1 + data = py.xml.escape(str(excrepr)) + self.test_logs.append( + '\n' + ' ' + '%s' % data) + + def pytest_sessionstart(self, session): + self.suite_start_time = time.time() + + def pytest_sessionfinish(self, session, exitstatus, __multicall__): + logfile = open(self.logfile, 'w', 1) # line buffered + suite_stop_time = time.time() + suite_time_delta = suite_stop_time - self.suite_start_time + numtests = self.passed + self.failed + logfile.write('') + logfile.writelines(self.test_logs) + logfile.write('') + logfile.close() + tw = session.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + tw.sep("-", "generated xml file: %s" %(self.logfile)) --- a/doc/test/plugin/logxml.txt +++ /dev/null @@ -1,28 +0,0 @@ - -logging of test results in JUnit-XML format, for use with Hudson -================================================================ - - -.. contents:: - :local: - -and build integration servers. Based on initial code from Ross Lawley. - -command line options --------------------- - - -``--xml=path`` - create junit-xml style report file at the given path. - -Start improving this plugin in 30 seconds -========================================= - - -1. Download `pytest_logxml.py`_ plugin source code -2. put it somewhere as ``pytest_logxml.py`` into your import path -3. a subsequent ``py.test`` run will use your local version - -Checkout customize_, other plugins_ or `get in contact`_. - -.. include:: links.txt --- /dev/null +++ b/doc/test/plugin/junitxml.txt @@ -0,0 +1,28 @@ + +logging of test results in JUnit-XML format, for use with Hudson +================================================================ + + +.. contents:: + :local: + +and build integration servers. Based on initial code from Ross Lawley. + +command line options +-------------------- + + +``--junitxml=path`` + create junit-xml style report file at given path. + +Start improving this plugin in 30 seconds +========================================= + + +1. Download `pytest_junitxml.py`_ plugin source code +2. put it somewhere as ``pytest_junitxml.py`` into your import path +3. a subsequent ``py.test`` run will use your local version + +Checkout customize_, other plugins_ or `get in contact`_. + +.. include:: links.txt --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,4 +1,4 @@ -.. _`pytest_logxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_logxml.py +.. _`helpconfig`: helpconfig.html .. _`terminal`: terminal.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html @@ -20,8 +20,7 @@ .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_tmpdir.py .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_figleaf.py .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_hooklog.py -.. _`logxml`: logxml.html -.. _`helpconfig`: helpconfig.html +.. _`junitxml`: junitxml.html .. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/plugin.py .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout @@ -40,6 +39,7 @@ .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html +.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_junitxml.py .. _`django`: django.html .. _`xmlresult`: xmlresult.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_unittest.py --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -38,7 +38,7 @@ reporting and failure logging pastebin_ submit failure or test session information to a pastebin service. -logxml_ logging of test results in JUnit-XML format, for use with Hudson +junitxml_ logging of test results in JUnit-XML format, for use with Hudson xmlresult_ (external) for generating xml reports and CruiseControl integration --- a/doc/test/plugin/xdist.txt +++ b/doc/test/plugin/xdist.txt @@ -9,8 +9,8 @@ loop on failing tests, distribute test r The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: -* Looponfail: run your tests in a subprocess. After it finishes py.test - waits until a file in your project changes and then re-runs only the +* Looponfail: run your tests repeatedly in a subprocess. After each run py.test + waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass after which again a full run is performed. --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -9,7 +9,7 @@ plugins = [ ('other testing domains, misc', 'oejskit django xdist genscript'), ('reporting and failure logging', - 'pastebin logxml xmlresult resultlog terminal',), + 'pastebin junitxml xmlresult resultlog terminal',), ('other testing conventions', 'unittest nose doctest restdoc'), ('core debugging / help functionality', From commits-noreply at bitbucket.org Sun Jan 17 10:55:01 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Sun, 17 Jan 2010 09:55:01 +0000 (UTC) Subject: [py-svn] py-trunk commit 201d526c373c: fix deprecation warnings Message-ID: <20100117095501.7BE3B7EF01@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263722076 -3600 # Node ID 201d526c373c043b31fe8a03a35354381c323ed8 # Parent e693d93b84def64b7c8806fe6fcb53389a5878de fix deprecation warnings --- a/doc/example/funcarg/test_multi_python.py +++ b/doc/example/funcarg/test_multi_python.py @@ -11,7 +11,7 @@ pythonlist = ['python2.3', 'python2.4', def pytest_generate_tests(metafunc): if 'python1' in metafunc.funcargnames: assert 'python2' in metafunc.funcargnames - for obj in metafunc.function.multiarg.obj: + for obj in metafunc.function.multiarg.kwargs['obj']: for py1 in pythonlist: for py2 in pythonlist: metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj), @@ -61,5 +61,5 @@ class Python: if not res: raise SystemExit(1) """ % (str(self.picklefile), expression))) - print loadfile + print (loadfile) py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) --- a/doc/example/assertion/test_failures.py +++ b/doc/example/assertion/test_failures.py @@ -1,6 +1,6 @@ import py -failure_demo = py.magic.autopath().dirpath('failure_demo.py') +failure_demo = py.path.local(__file__).dirpath('failure_demo.py') pytest_plugins = "pytest_pytester" From commits-noreply at bitbucket.org Mon Jan 18 02:08:15 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 01:08:15 +0000 (UTC) Subject: [py-svn] py-trunk commit 772fad27192b: refine tests and refine code to deal with new xdist semantics. Message-ID: <20100118010815.75EC97EF2D@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263766982 -3600 # Node ID 772fad27192b7aa96581e61766c84e96929afffd # Parent 201d526c373c043b31fe8a03a35354381c323ed8 refine tests and refine code to deal with new xdist semantics. --- a/py/_plugin/pytest_restdoc.py +++ b/py/_plugin/pytest_restdoc.py @@ -30,16 +30,13 @@ def getproject(path): return Project(parent) class ReSTFile(py.test.collect.File): - def __init__(self, fspath, parent, project=None): + def __init__(self, fspath, parent, project): super(ReSTFile, self).__init__(fspath=fspath, parent=parent) - if project is None: - project = getproject(fspath) - assert project is not None self.project = project def collect(self): return [ - ReSTSyntaxTest(self.project, "ReSTSyntax", parent=self), + ReSTSyntaxTest("ReSTSyntax", parent=self, project=self.project), LinkCheckerMaker("checklinks", parent=self), DoctestText("doctest", parent=self), ] @@ -63,8 +60,8 @@ def deindent(s, sep='\n'): return sep.join(lines) class ReSTSyntaxTest(py.test.collect.Item): - def __init__(self, project, *args, **kwargs): - super(ReSTSyntaxTest, self).__init__(*args, **kwargs) + def __init__(self, name, parent, project): + super(ReSTSyntaxTest, self).__init__(name=name, parent=parent) self.project = project def reportinfo(self): --- a/testing/test_session.py +++ b/testing/test_session.py @@ -193,8 +193,3 @@ class TestNewSession(SessionTests): colfail = [x for x in finished if x.failed] assert len(colfail) == 1 -class TestNewSessionDSession(SessionTests): - def parseconfig(self, *args): - args = ('-n1',) + args - return SessionTests.parseconfig(self, *args) - --- a/testing/plugin/test_pytest_terminal.py +++ b/testing/plugin/test_pytest_terminal.py @@ -33,21 +33,16 @@ class Option: def pytest_generate_tests(metafunc): if "option" in metafunc.funcargnames: - metafunc.addcall( - id="default", - funcargs={'option': Option(verbose=False)} - ) - metafunc.addcall( - id="verbose", - funcargs={'option': Option(verbose=True)} - ) - if metafunc.config.pluginmanager.hasplugin("xdist"): - nodist = getattr(metafunc.function, 'nodist', False) - if not nodist: - metafunc.addcall( - id="verbose-dist", - funcargs={'option': Option(dist='each', verbose=True)} - ) + metafunc.addcall(id="default", param=Option(verbose=False)) + metafunc.addcall(id="verbose", param=Option(verbose=True)) + if not getattr(metafunc.function, 'nodist', False): + metafunc.addcall(id="verbose-dist", + param=Option(dist='each', verbose=True)) + +def pytest_funcarg__option(request): + if request.param.dist: + request.config.pluginmanager.skipifmissing("xdist") + return request.param class TestTerminal: def test_pass_skip_fail(self, testdir, option): @@ -255,12 +250,19 @@ class TestTerminal: ]) def test_keyboard_interrupt_dist(self, testdir, option): + # xxx could be refined to check for return code p = testdir.makepyfile(""" - raise KeyboardInterrupt + def test_sleep(): + import time + time.sleep(10) """) - result = testdir.runpytest(*option._getcmdargs()) - assert result.ret == 2 - result.stdout.fnmatch_lines(['*KEYBOARD INTERRUPT*']) + child = testdir.spawn_pytest(" ".join(option._getcmdargs())) + child.expect(".*test session starts.*") + child.kill(2) # keyboard interrupt + child.expect(".*KeyboardInterrupt.*") + #child.expect(".*seconds.*") + child.close() + #assert ret == 2 @py.test.mark.nodist def test_keyboard_interrupt(self, testdir, option): @@ -593,9 +595,10 @@ def test_terminalreporter_reportopt_conf assert result.stdout.fnmatch_lines([ "*1 passed*" ]) - def test_trace_reporting(self, testdir): - result = testdir.runpytest("--trace") - assert result.stdout.fnmatch_lines([ - "*active plugins*" - ]) - assert result.ret == 0 + +def test_trace_reporting(testdir): + result = testdir.runpytest("--traceconfig") + assert result.stdout.fnmatch_lines([ + "*active plugins*" + ]) + assert result.ret == 0 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -246,3 +246,132 @@ def test_preparse_ordering(testdir, monk plugin = config.pluginmanager.getplugin("mytestplugin") assert plugin.x == 42 + +import pickle +class TestConfigPickling: + def pytest_funcarg__testdir(self, request): + oldconfig = py.test.config + print("setting py.test.config to None") + py.test.config = None + def resetglobals(): + py.builtin.print_("setting py.test.config to", oldconfig) + py.test.config = oldconfig + request.addfinalizer(resetglobals) + return request.getfuncargvalue("testdir") + + def test_config_getstate_setstate(self, testdir): + from py._test.config import Config + testdir.makepyfile(__init__="", conftest="x=1; y=2") + hello = testdir.makepyfile(hello="") + tmp = testdir.tmpdir + testdir.chdir() + config1 = testdir.parseconfig(hello) + config2 = Config() + config2.__setstate__(config1.__getstate__()) + assert config2.topdir == py.path.local() + config2_relpaths = [py.path.local(x).relto(config2.topdir) + for x in config2.args] + config1_relpaths = [py.path.local(x).relto(config1.topdir) + for x in config1.args] + + assert config2_relpaths == config1_relpaths + for name, value in config1.option.__dict__.items(): + assert getattr(config2.option, name) == value + assert config2.getvalue("x") == 1 + + def test_config_pickling_customoption(self, testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + group = parser.getgroup("testing group") + group.addoption('-G', '--glong', action="store", default=42, + type="int", dest="gdest", help="g value.") + """) + config = testdir.parseconfig("-G", "11") + assert config.option.gdest == 11 + repr = config.__getstate__() + + config = testdir.Config() + py.test.raises(AttributeError, "config.option.gdest") + + config2 = testdir.Config() + config2.__setstate__(repr) + assert config2.option.gdest == 11 + + def test_config_pickling_and_conftest_deprecated(self, testdir): + tmp = testdir.tmpdir.ensure("w1", "w2", dir=1) + tmp.ensure("__init__.py") + tmp.join("conftest.py").write(py.code.Source(""" + def pytest_addoption(parser): + group = parser.getgroup("testing group") + group.addoption('-G', '--glong', action="store", default=42, + type="int", dest="gdest", help="g value.") + """)) + config = testdir.parseconfig(tmp, "-G", "11") + assert config.option.gdest == 11 + repr = config.__getstate__() + + config = testdir.Config() + py.test.raises(AttributeError, "config.option.gdest") + + config2 = testdir.Config() + config2.__setstate__(repr) + assert config2.option.gdest == 11 + + option = config2.addoptions("testing group", + config2.Option('-G', '--glong', action="store", default=42, + type="int", dest="gdest", help="g value.")) + assert option.gdest == 11 + + def test_config_picklability(self, testdir): + config = testdir.parseconfig() + s = pickle.dumps(config) + newconfig = pickle.loads(s) + assert hasattr(newconfig, "topdir") + assert newconfig.topdir == py.path.local() + + def test_collector_implicit_config_pickling(self, testdir): + tmpdir = testdir.tmpdir + testdir.chdir() + testdir.makepyfile(hello="def test_x(): pass") + config = testdir.parseconfig(tmpdir) + col = config.getnode(config.topdir) + io = py.io.BytesIO() + pickler = pickle.Pickler(io) + pickler.dump(col) + io.seek(0) + unpickler = pickle.Unpickler(io) + col2 = unpickler.load() + assert col2.name == col.name + assert col2.listnames() == col.listnames() + + def test_config_and_collector_pickling(self, testdir): + tmpdir = testdir.tmpdir + dir1 = tmpdir.ensure("sourcedir", "somedir", dir=1) + config = testdir.parseconfig() + assert config.topdir == tmpdir + col = config.getnode(dir1.dirpath()) + col1 = config.getnode(dir1) + assert col1.parent == col + io = py.io.BytesIO() + pickler = pickle.Pickler(io) + pickler.dump(col) + pickler.dump(col1) + pickler.dump(col) + io.seek(0) + unpickler = pickle.Unpickler(io) + newtopdir = tmpdir.ensure("newtopdir", dir=1) + newtopdir.mkdir("sourcedir").mkdir("somedir") + old = newtopdir.chdir() + try: + newcol = unpickler.load() + newcol2 = unpickler.load() + newcol3 = unpickler.load() + assert newcol2.config is newcol.config + assert newcol2.parent == newcol + assert newcol2.config.topdir.realpath() == newtopdir.realpath() + newsourcedir = newtopdir.join("sourcedir") + assert newcol.fspath.realpath() == newsourcedir.realpath() + assert newcol2.fspath.basename == dir1.basename + assert newcol2.fspath.relto(newcol2.config.topdir) + finally: + old.chdir() --- a/py/_test/config.py +++ b/py/_test/config.py @@ -15,9 +15,9 @@ def ensuretemp(string, dir=1): return py.test.config.ensuretemp(string, dir=dir) class CmdOptions(object): - """ pure container instance for holding cmdline options - as attributes. - """ + """ holds cmdline options as attributes.""" + def __init__(self, **kwargs): + self.__dict__.update(kwargs) def __repr__(self): return "" %(self.__dict__,) @@ -31,8 +31,8 @@ class Config(object): basetemp = None _sessionclass = None - def __init__(self, topdir=None): - self.option = CmdOptions() + def __init__(self, topdir=None, option=None): + self.option = option or CmdOptions() self.topdir = topdir self._parser = parseopt.Parser( usage="usage: %prog [options] [file_or_dir] [file_or_dir] [...]", @@ -47,9 +47,9 @@ class Config(object): self.pluginmanager.consider_conftest(conftestmodule) def _getmatchingplugins(self, fspath): - conftests = self._conftest._conftestpath2mod.values() + allconftests = self._conftest._conftestpath2mod.values() plugins = [x for x in self.pluginmanager.getplugins() - if x not in conftests] + if x not in allconftests] plugins += self._conftest.getconftestmodules(fspath) return plugins @@ -114,20 +114,20 @@ class Config(object): for path in self.args: path = py.path.local(path) l.append(path.relto(self.topdir)) - return l, vars(self.option) + return l, self.option.__dict__ def __setstate__(self, repr): # we have to set py.test.config because loading # of conftest files may use it (deprecated) # mainly by py.test.config.addoptions() - py.test.config = self + global config_per_process + py.test.config = config_per_process = self + args, cmdlineopts = repr + cmdlineopts = CmdOptions(**cmdlineopts) # next line will registers default plugins - self.__init__(topdir=py.path.local()) + self.__init__(topdir=py.path.local(), option=cmdlineopts) self._rootcol = RootCollector(config=self) - args, cmdlineopts = repr args = [str(self.topdir.join(x)) for x in args] - self.option = CmdOptions() - self.option.__dict__.update(cmdlineopts) self._preparse(args) self._setargs(args) @@ -177,7 +177,7 @@ class Config(object): def _getcollectclass(self, name, path): try: - cls = self.getvalue(name, path) + cls = self._conftest.rget(name, path) except KeyError: return getattr(py.test.collect, name) else: --- a/py/_plugin/pytest_pytester.py +++ b/py/_plugin/pytest_pytester.py @@ -219,7 +219,7 @@ class TmpTestdir: if not args: args = [self.tmpdir] from py._test import config - oldconfig = py.test.config + oldconfig = config.config_per_process # py.test.config try: c = config.config_per_process = py.test.config = pytestConfig() c.basetemp = oldconfig.mktemp("reparse", numbered=True) --- a/testing/plugin/test_pytest_unittest.py +++ b/testing/plugin/test_pytest_unittest.py @@ -51,7 +51,7 @@ def test_new_instances(testdir): reprec.assertoutcome(passed=2) def test_teardown(testdir): - testpath = testdir.makepyfile(test_three=""" + testpath = testdir.makepyfile(""" import unittest pytest_plugins = "pytest_unittest" # XXX class MyTestCase(unittest.TestCase): --- a/py/_test/collect.py +++ b/py/_test/collect.py @@ -1,6 +1,5 @@ """ -base test collection objects. Collectors and test Items form a tree -that is usually built iteratively. +test collection nodes, forming a tree, Items are leafs. """ import py @@ -33,9 +32,9 @@ class Node(object): self.fspath = getattr(parent, 'fspath', None) self.ihook = HookProxy(self) - def _checkcollectable(self): - if not hasattr(self, 'fspath'): - self.parent._memocollect() # to reraise exception + def _reraiseunpicklingproblem(self): + if hasattr(self, '_unpickle_exc'): + py.builtin._reraise(*self._unpickle_exc) # # note to myself: Pickling is uh. @@ -46,23 +45,25 @@ class Node(object): name, parent = nameparent try: colitems = parent._memocollect() - except KeyboardInterrupt: - raise - except Exception: - # seems our parent can't collect us - # so let's be somewhat operable - # _checkcollectable() is to tell outsiders about the fact - self.name = name - self.parent = parent - self.config = parent.config - #self._obj = "could not unpickle" - else: for colitem in colitems: if colitem.name == name: # we are a copy that will not be returned # by our parent self.__dict__ = colitem.__dict__ break + else: + raise ValueError("item %r not found in parent collection %r" %( + name, [x.name for x in colitems])) + except KeyboardInterrupt: + raise + except Exception: + # our parent can't collect us but we want unpickling to + # otherwise continue - self._reraiseunpicklingproblem() will + # reraise the problem + self._unpickle_exc = py.std.sys.exc_info() + self.name = name + self.parent = parent + self.config = parent.config def __repr__(self): if getattr(self.config.option, 'debug', False): @@ -268,15 +269,12 @@ class FSCollector(Collector): self.fspath = fspath def __getstate__(self): - if isinstance(self.parent, RootCollector): - relpath = self.parent._getrelpath(self.fspath) - return (relpath, self.parent) - else: - return (self.name, self.parent) - - def __setstate__(self, picklestate): - name, parent = picklestate - self.__init__(parent.fspath.join(name), parent=parent) + # RootCollector.getbynames() inserts a directory which we need + # to throw out here for proper re-instantiation + if isinstance(self.parent.parent, RootCollector): + assert self.parent.fspath == self.parent.parent.fspath, self.parent + return (self.name, self.parent.parent) # shortcut + return super(Collector, self).__getstate__() class File(FSCollector): """ base class for collecting tests from a file. """ @@ -382,6 +380,9 @@ class RootCollector(Directory): def __init__(self, config): Directory.__init__(self, config.topdir, parent=None, config=config) self.name = None + + def __repr__(self): + return "" %(self.fspath,) def getbynames(self, names): current = self.consider(self.config.topdir) --- a/py/_test/session.py +++ b/py/_test/session.py @@ -24,6 +24,8 @@ class Session(object): def genitems(self, colitems, keywordexpr=None): """ yield Items from iterating over the given colitems. """ + if colitems: + colitems = list(colitems) while colitems: next = colitems.pop(0) if isinstance(next, (tuple, list)): From commits-noreply at bitbucket.org Mon Jan 18 02:08:13 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 01:08:13 +0000 (UTC) Subject: [py-svn] py-trunk commit 83ddfaf24fff: fix python2.4 issue Message-ID: <20100118010813.71B447EF2C@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263776476 -3600 # Node ID 83ddfaf24fffa02be51c7a749ad2866b90a94a07 # Parent 7b9a4fa15994011c40abb523311b20dc0777075c fix python2.4 issue --- a/testing/test_outcome.py +++ b/testing/test_outcome.py @@ -68,4 +68,5 @@ def test_pytest_cmdline_main(testdir): py.test.cmdline.main([__file__]) """ % (str(py._pydir.dirpath()))) import subprocess - subprocess.check_call([sys.executable, str(p)]) + ret = subprocess.call([sys.executable, str(p)]) + assert ret == 0 From commits-noreply at bitbucket.org Mon Jan 18 02:08:13 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 01:08:13 +0000 (UTC) Subject: [py-svn] py-trunk commit 7b9a4fa15994: move rsync reporting out Message-ID: <20100118010813.5E2D17EF29@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263771682 -3600 # Node ID 7b9a4fa15994011c40abb523311b20dc0777075c # Parent 772fad27192b7aa96581e61766c84e96929afffd move rsync reporting out --- a/testing/plugin/test_pytest_terminal.py +++ b/testing/plugin/test_pytest_terminal.py @@ -100,40 +100,6 @@ class TestTerminal: "INTERNALERROR> *raise ValueError*" ]) - def test_gwmanage_events(self, testdir, linecomp): - execnet = py.test.importorskip("execnet") - modcol = testdir.getmodulecol(""" - def test_one(): - pass - """, configargs=("-v",)) - - rep = TerminalReporter(modcol.config, file=linecomp.stringio) - class gw1: - id = "X1" - spec = execnet.XSpec("popen") - class gw2: - id = "X2" - spec = execnet.XSpec("popen") - class rinfo: - version_info = (2, 5, 1, 'final', 0) - executable = "hello" - platform = "xyz" - cwd = "qwe" - - rep.pytest_gwmanage_newgateway(gw1, rinfo) - linecomp.assert_contains_lines([ - "*X1*popen*xyz*2.5*" - ]) - - rep.pytest_gwmanage_rsyncstart(source="hello", gateways=[gw1, gw2]) - linecomp.assert_contains_lines([ - "rsyncstart: hello -> [X1], [X2]" - ]) - rep.pytest_gwmanage_rsyncfinish(source="hello", gateways=[gw1, gw2]) - linecomp.assert_contains_lines([ - "rsyncfinish: hello -> [X1], [X2]" - ]) - def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") stringio = py.io.TextIO() --- a/py/_plugin/pytest_terminal.py +++ b/py/_plugin/pytest_terminal.py @@ -143,17 +143,6 @@ class TerminalReporter: self.write_line(infoline) self.gateway2info[gateway] = infoline - def pytest_gwmanage_rsyncstart(self, source, gateways): - targets = ", ".join(["[%s]" % gw.id for gw in gateways]) - msg = "rsyncstart: %s -> %s" %(source, targets) - if not self.config.option.verbose: - msg += " # use --verbose to see rsync progress" - self.write_line(msg) - - def pytest_gwmanage_rsyncfinish(self, source, gateways): - targets = ", ".join(["[%s]" % gw.id for gw in gateways]) - self.write_line("rsyncfinish: %s -> %s" %(source, targets)) - def pytest_plugin_registered(self, plugin): if self.config.option.traceconfig: msg = "PLUGIN registered: %s" %(plugin,) From commits-noreply at bitbucket.org Mon Jan 18 03:04:38 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 02:04:38 +0000 (UTC) Subject: [py-svn] py-trunk commit 9a14e7af3e38: refine excludepath handling to treat entries with no path as matching Message-ID: <20100118020438.0A63F7EF21@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263780260 -3600 # Node ID 9a14e7af3e386f48c1e549cd3c975f95d881621a # Parent 83ddfaf24fffa02be51c7a749ad2866b90a94a07 refine excludepath handling to treat entries with no path as matching --- a/py/_code/code.py +++ b/py/_code/code.py @@ -293,8 +293,8 @@ class Traceback(list): code = x.frame.code codepath = code.path if ((path is None or codepath == path) and - (excludepath is None or (hasattr(codepath, 'relto') and - not codepath.relto(excludepath))) and + (excludepath is None or not hasattr(codepath, 'relto') or + not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry) --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -114,8 +114,10 @@ class TestTraceback_f_g_h: excinfo = py.test.raises(ValueError, "p.pyimport().f()") basedir = py._pydir newtraceback = excinfo.traceback.cut(excludepath=basedir) - assert len(newtraceback) == 1 - assert newtraceback[0].frame.code.path == p + for x in newtraceback: + if hasattr(x, 'path'): + assert not py.path.local(x.path).relto(basedir) + assert newtraceback[-1].frame.code.path == p def test_traceback_filter(self): traceback = self.excinfo.traceback From commits-noreply at bitbucket.org Mon Jan 18 11:20:20 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 10:20:20 +0000 (UTC) Subject: [py-svn] py-trunk commit a4b28ccd6870: pushing towards 1.2.0 Message-ID: <20100118102020.7190F7EF16@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263809999 -3600 # Node ID a4b28ccd6870da8077bee9bc7b2c36bdb3f45db9 # Parent 9a14e7af3e386f48c1e549cd3c975f95d881621a pushing towards 1.2.0 --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,48 +1,48 @@ .. _`helpconfig`: helpconfig.html .. _`terminal`: terminal.html -.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_recwarn.py +.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html -.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_monkeypatch.py -.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_genscript.py +.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_monkeypatch.py +.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_genscript.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`genscript`: genscript.html .. _`plugins`: index.html .. _`mark`: mark.html .. _`tmpdir`: tmpdir.html -.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_doctest.py +.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_doctest.py .. _`capture`: capture.html -.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_nose.py -.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_restdoc.py +.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_nose.py +.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`xdist`: xdist.html -.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pastebin.py -.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_tmpdir.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_figleaf.py -.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_hooklog.py +.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pastebin.py +.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_tmpdir.py +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_figleaf.py +.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_hooklog.py .. _`junitxml`: junitxml.html -.. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/plugin.py -.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_skipping.py +.. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/plugin.py +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout -.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_helpconfig.py +.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html -.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_mark.py +.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_mark.py .. _`get in contact`: ../../contact.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_capture.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_terminal.py +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_pdb.py +.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html -.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_junitxml.py +.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_junitxml.py .. _`django`: django.html .. _`xmlresult`: xmlresult.html -.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_unittest.py +.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_unittest.py .. _`nose`: nose.html -.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0a1/py/plugin/pytest_resultlog.py +.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_resultlog.py .. _`pdb`: pdb.html --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ def main(): name='py', description='py.test and pylib: rapid testing and development utils.', long_description = long_description, - version= trunk or '1.2.0a1', + version= trunk or '1.2.0', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], --- a/py/__init__.py +++ b/py/__init__.py @@ -8,9 +8,9 @@ dictionary or an import path. (c) Holger Krekel and others, 2004-2010 """ -version = "1.2.0a1" +version = "1.2.0" -__version__ = version = version or "1.1.x" +__version__ = version = version or "1.2.x" import py.apipkg py.apipkg.initpkg(__name__, dict( From commits-noreply at bitbucket.org Mon Jan 18 12:12:30 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 11:12:30 +0000 (UTC) Subject: [py-svn] py-trunk commit 576f202bf907: always directly use basename for tracebacks, independently from code.path Message-ID: <20100118111230.C71E87EF0F@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263813138 -3600 # Node ID 576f202bf9075ec7c7d4ab7951ebd3bdf558dfe0 # Parent a4b28ccd6870da8077bee9bc7b2c36bdb3f45db9 always directly use basename for tracebacks, independently from code.path fixes issue77 although i guess it was already fixed before. --- a/py/_code/code.py +++ b/py/_code/code.py @@ -1,5 +1,5 @@ import py -import sys +import sys, os.path builtin_repr = repr @@ -70,13 +70,13 @@ class Code(object): return py.std.new.code(*arglist) def path(self): - """ return a py.path.local object pointing to the source code """ + """ return a path object pointing to source code""" fn = self.raw.co_filename try: return fn.__path__ except AttributeError: p = py.path.local(self.raw.co_filename) - if not p.check(file=1): + if not p.check(): # XXX maybe try harder like the weird logic # in the standard lib [linecache.updatecache] does? p = self.raw.co_filename @@ -537,9 +537,9 @@ class FormattedExcinfo(object): else: if self.style == "short": line = source[line_index].lstrip() - trybasename = getattr(entry.path, 'basename', entry.path) + basename = os.path.basename(entry.frame.code.filename) lines.append(' File "%s", line %d, in %s' % ( - trybasename, entry.lineno+1, entry.name)) + basename, entry.lineno+1, entry.name)) lines.append(" " + line) if excinfo: lines.extend(self.get_exconly(excinfo, indent=4)) --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -503,6 +503,32 @@ raise ValueError() reprtb = p.repr_traceback(excinfo) assert len(reprtb.reprentries) == 3 + def test_traceback_short_no_source(self, importasmod, monkeypatch): + mod = importasmod(""" + def func1(): + raise ValueError("hello") + def entry(): + func1() + """) + excinfo = py.test.raises(ValueError, mod.entry) + from py._code.code import Code + monkeypatch.setattr(Code, 'path', 'bogus') + excinfo.traceback[0].frame.code.path = "bogus" + p = FormattedExcinfo(style="short") + reprtb = p.repr_traceback_entry(excinfo.traceback[-2]) + lines = reprtb.lines + last_p = FormattedExcinfo(style="short") + last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo) + last_lines = last_reprtb.lines + monkeypatch.undo() + basename = py.path.local(mod.__file__).basename + assert lines[0] == ' File "%s", line 5, in entry' % basename + assert lines[1] == ' func1()' + + assert last_lines[0] == ' File "%s", line 3, in func1' % basename + assert last_lines[1] == ' raise ValueError("hello")' + assert last_lines[2] == 'E ValueError: hello' + def test_repr_traceback_and_excinfo(self, importasmod): mod = importasmod(""" def f(x): From commits-noreply at bitbucket.org Mon Jan 18 13:17:09 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 12:17:09 +0000 (UTC) Subject: [py-svn] py-trunk commit 302fcb17fba8: turn this into a black-box test Message-ID: <20100118121709.E3E7B7EF20@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263817018 -3600 # Node ID 302fcb17fba808d3545e19544128736e88d11863 # Parent 576f202bf9075ec7c7d4ab7951ebd3bdf558dfe0 turn this into a black-box test --- a/doc/example/assertion/test_failures.py +++ b/doc/example/assertion/test_failures.py @@ -5,10 +5,11 @@ failure_demo = py.path.local(__file__).d pytest_plugins = "pytest_pytester" def test_failure_demo_fails_properly(testdir): - reprec = testdir.inline_run(failure_demo) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 0 - assert failed == 20, failed - colreports = reprec.getreports("pytest_collectreport") - failed = len([x.failed for x in colreports]) - assert failed == 3 + target = testdir.tmpdir.join(failure_demo.basename) + failure_demo.copy(target) + failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) + result = testdir.runpytest(target) + result.stdout.fnmatch_lines([ + "*20 failed*" + ]) + assert result.ret != 0 From commits-noreply at bitbucket.org Mon Jan 18 16:20:20 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 15:20:20 +0000 (UTC) Subject: [py-svn] py-trunk commit be113b5eb5f9: some finalizing docs bit, regen plugin docs Message-ID: <20100118152020.A49777EF28@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263827939 -3600 # Node ID be113b5eb5f9a58e07c1007849571f3edfa7ee8b # Parent 302fcb17fba808d3545e19544128736e88d11863 some finalizing docs bit, regen plugin docs --- /dev/null +++ b/doc/announce/release-1.2.0.txt @@ -0,0 +1,115 @@ +py.test/pylib 1.2.0: junitxml, standalone test scripts, pluginization +-------------------------------------------------------------------------------- + +py.test is an advanced automated testing tool working with +Python2, Python3 and Jython versions on all major operating +systems. It has a simple plugin architecture and can run many +existing common Python test suites without modification. It offers +some unique features not found in other testing tools. +See http://pytest.org for more info. + +py.test 1.2.0 brings many bug fixes and interesting new abilities: + +* --junitxml=path will create an XML file for use with CI processing +* --genscript=path creates a standalone py.test-equivalent test-script +* --ignore=path prevents collection of anything below that path +* --confcutdir=path only lookup conftest.py test configs below that path +* a new "pytestconfig" function argument gives direct access to option values +* parametrized tests can now be specified per-class as well +* on CPython py.test additionally installs as "py.test-VERSION" + +Apart from many bug fixes 1.2.0 also has better pluginization. +Distributed testing and looponfailing testing have been moved +out into its own "pytest-xdist" plugin which can be installed separately. +The same is true for "pytest-figleaf" for doing coverage reporting. +Those can also serve well now as blue prints for doing your own. +separately released plugins. + +thanks to all who helped and gave feedback, +have fun, + +holger krekel, January 2010 + +Changes between 1.2.0 and 1.1.1 +===================================== + +- moved dist/looponfailing from py.test core into a new + separately released pytest-xdist plugin. + +- new junitxml plugin: --junitxml=path will generate a junit style xml file + which is processable e.g. by the Hudson CI system. + +- new option: --genscript=path will generate a standalone py.test script + which will not need any libraries installed. thanks to Ralf Schmitt. + +- new option: --ignore will prevent specified path from collection. + Can be specified multiple times. + +- new option: --confcutdir=dir will make py.test only consider conftest + files that are relative to the specified dir. + +- new funcarg: "pytestconfig" is the pytest config object for access + to command line args and can now be easily used in a test. + +- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to + disambiguate between Python3, python2.X, Jython and PyPy installed versions. + +- new "pytestconfig" funcarg allows access to test config object + +- new "pytest_report_header" hook can return additional lines + to be displayed at the header of a test run. + +- (experimental) allow "py.test path::name1::name2::..." for pointing + to a test within a test collection directly. This might eventually + evolve as a full substitute to "-k" specifications. + +- streamlined plugin loading: order is now as documented in + customize.html: setuptools, ENV, commandline, conftest. + also setuptools entry point names are turned to canonical namees ("pytest_*") + +- automatically skip tests that need 'capfd' but have no os.dup + +- allow pytest_generate_tests to be defined in classes as well + +- deprecate usage of 'disabled' attribute in favour of pytestmark +- deprecate definition of Directory, Module, Class and Function nodes + in conftest.py files. Use pytest collect hooks instead. + +- collection/item node specific runtest/collect hooks are only called exactly + on matching conftest.py files, i.e. ones which are exactly below + the filesystem path of an item + +- change: the first pytest_collect_directory hook to return something + will now prevent further hooks to be called. + +- change: figleaf plugin now requires --figleaf to run. Also + change its long command line options to be a bit shorter (see py.test -h). + +- change: pytest doctest plugin is now enabled by default and has a + new option --doctest-glob to set a pattern for file matches. + +- change: remove internal py._* helper vars, only keep py._pydir + +- robustify capturing to survive if custom pytest_runtest_setup + code failed and prevented the capturing setup code from running. + +- make py.test.* helpers provided by default plugins visible early - + works transparently both for pydoc and for interactive sessions + which will regularly see e.g. py.test.mark and py.test.importorskip. + +- simplify internal plugin manager machinery +- simplify internal collection tree by introducing a RootCollector node + +- fix assert reinterpreation that sees a call containing "keyword=..." + +- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish + hooks on slaves during dist-testing, report module/session teardown + hooks correctly. + +- fix issue65: properly handle dist-testing if no + execnet/py lib installed remotely. + +- skip some install-tests if no execnet is available + +- fix docs, fix internal bin/ script generation + --- a/doc/test/index.txt +++ b/doc/test/index.txt @@ -24,6 +24,5 @@ changelog_: history of changes covering .. _features: features.html .. _funcargs: funcargs.html .. _customize: customize.html -.. _`distributed testing`: dist.html --- a/doc/test/dist.txt +++ /dev/null @@ -1,132 +0,0 @@ -.. _`distribute tests across machines`: - -=================== -Distributed testing -=================== - -``py.test`` can ad-hoc distribute test runs to multiple CPUs or remote -machines. This allows to speed up development or to use special resources -of remote machines. Before running tests remotely, ``py.test`` efficiently -synchronizes your program source code to the remote place. All test results -are reported back and displayed to your local test session. You may -specify different Python versions and interpreters. - -**Requirements**: you need to install the `execnet`_ package -(at least version 1.0.0b4) to perform distributed test runs. - -**NOTE**: Version 1.1.x is not able to distribute tests across Python3/Python2 barriers. - -Speed up test runs by sending tests to multiple CPUs ----------------------------------------------------------- - -To send tests to multiple CPUs, type:: - - py.test -n NUM - -Especially for longer running tests or tests requiring -a lot of IO this can lead to considerable speed ups. - - -Running tests in a Python subprocess ----------------------------------------- - -To instantiate a python2.4 sub process and send tests to it, you may type:: - - py.test -d --tx popen//python=python2.4 - -This will start a subprocess which is run with the "python2.4" -Python interpreter, found in your system binary lookup path. - -If you prefix the --tx option value like this:: - - --tx 3*popen//python=python2.4 - -then three subprocesses would be created and tests -will be load-balanced across these three processes. - - -Sending tests to remote SSH accounts ------------------------------------------------ - -Suppose you have a package ``mypkg`` which contains some -tests that you can successfully run locally. And you -have a ssh-reachable machine ``myhost``. Then -you can ad-hoc distribute your tests by typing:: - - py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg - -This will synchronize your ``mypkg`` package directory -to an remote ssh account and then locally collect tests -and send them to remote places for execution. - -You can specify multiple ``--rsyncdir`` directories -to be sent to the remote side. - -**NOTE:** For py.test to collect and send tests correctly -you not only need to make sure all code and tests -directories are rsynced, but that any test (sub) directory -also has an ``__init__.py`` file because internally -py.test references tests as a fully qualified python -module path. **You will otherwise get strange errors** -during setup of the remote side. - -Sending tests to remote Socket Servers ----------------------------------------- - -Download the single-module `socketserver.py`_ Python program -and run it like this:: - - python socketserver.py - -It will tell you that it starts listening on the default -port. You can now on your home machine specify this -new socket host with something like this:: - - py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg - - -.. _`atonce`: - -Running tests on many platforms at once -------------------------------------------------------------- - -The basic command to run tests on multiple platforms is:: - - py.test --dist=each --tx=spec1 --tx=spec2 - -If you specify a windows host, an OSX host and a Linux -environment this command will send each tests to all -platforms - and report back failures from all platforms -at once. The specifications strings use the `xspec syntax`_. - -.. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec - -.. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py - -.. _`execnet`: http://codespeak.net/execnet - -Specifying test exec environments in a conftest.py -------------------------------------------------------------- - -Instead of specifying command line options, you can -put options values in a ``conftest.py`` file like this:: - - pytest_option_tx = ['ssh=myhost//python=python2.5', 'popen//python=python2.5'] - pytest_option_dist = True - -Any commandline ``--tx`` specifictions will add to the list of available execution -environments. - -Specifying "rsync" dirs in a conftest.py -------------------------------------------------------------- - -In your ``mypkg/conftest.py`` you may specify directories to synchronise -or to exclude:: - - rsyncdirs = ['.', '../plugins'] - rsyncignore = ['_cache'] - -These directory specifications are relative to the directory -where the ``conftest.py`` is found. - - --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,5 +1,4 @@ .. _`helpconfig`: helpconfig.html -.. _`terminal`: terminal.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_monkeypatch.py @@ -15,7 +14,6 @@ .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html -.. _`xdist`: xdist.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_tmpdir.py .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_figleaf.py @@ -33,15 +31,14 @@ .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html +.. _`xdist`: xdist.html .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html .. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_junitxml.py .. _`django`: django.html -.. _`xmlresult`: xmlresult.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_unittest.py .. _`nose`: nose.html .. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_resultlog.py --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -8,7 +8,7 @@ mark_ generic mechanism for marking pyth pdb_ interactive debugging with the Python Debugger. -figleaf_ report test coverage using the 'figleaf' package. +figleaf_ (external) report test coverage using the 'figleaf' package. coverage_ (external) for testing with Ned's coverage module @@ -21,34 +21,26 @@ recwarn_ helpers for asserting deprecati tmpdir_ provide temporary directories to test functions. -other testing domains, misc -=========================== +distributed testing, CI and deployment +====================================== -oejskit_ (external) run javascript tests in real life browsers - -django_ (external) for testing django applications - -xdist_ loop on failing tests, distribute test runs to CPUs and hosts. - -genscript_ generate standalone test script to be distributed along with an application. - - -reporting and failure logging -============================= +xdist_ (external) loop on failing tests, distribute test runs to CPUs and hosts. pastebin_ submit failure or test session information to a pastebin service. junitxml_ logging of test results in JUnit-XML format, for use with Hudson -xmlresult_ (external) for generating xml reports and CruiseControl integration - resultlog_ non-xml machine-readable logging of test results. -terminal_ Implements terminal reporting of the full testing process. +genscript_ generate standalone test script to be distributed along with an application. -other testing conventions -========================= +testing domains and conventions +=============================== + +oejskit_ (external) run javascript tests in real life browsers + +django_ (external) for testing django applications unittest_ automatically discover and run traditional "unittest.py" style tests. --- a/doc/test/features.txt +++ b/doc/test/features.txt @@ -9,22 +9,40 @@ py.test feature overview mature command line testing tool ==================================================== -py.test is a command line tool to collect and run automated tests. It -runs well on Linux, Windows and OSX Python 2.4 through to 2.6 versions. -It can distribute a single test run to multiple machines. It is used in -many projects, ranging from running 10 thousands of tests integrated -with buildbot to a few inlined tests on a command line script. +py.test is a command line tool to collect, run and report about automated tests. It runs well on Linux, Windows and OSX and on Python 2.4 through to 3.1 versions. +It is used in many projects, ranging from running 10 thousands of tests +to a few inlined tests on a command line script. As of version 1.2 you can also +generate a no-dependency py.test-equivalent standalone script that you +can distribute along with your application. -.. _`autocollect`: +extensive easy plugin system +====================================================== -automatically collects and executes tests -=============================================== +.. _`suprisingly easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html -py.test discovers tests automatically by looking at -specified directories and its files for common -naming patterns. As ``py.test`` operates as a separate -cmdline tool you can easily have a command line utility and -some tests in the same file. +py.test delegates almost all aspects of its operation to plugins_. +It is `suprisingly easy`_ to add command line options or +do other kind of add-ons and customizations. This can +be done per-project or by distributing a global plugin. +One can can thus modify or add aspects for purposes such as: + +* reporting extensions +* customizing collection and execution of tests +* running and managing non-python tests +* managing domain-specific test state setup +* adding non-python tests into the run, e.g. driven by data files + +.. _`plugins`: plugin/index.html + +distributing tests to your CPUs and SSH accounts +========================================================== + +.. _`pytest-xdist`: plugin/xdist.html + +Through the use of the separately released `pytest-xdist`_ plugin you +can seemlessly distribute runs to multiple CPUs or remote computers +through SSH and sockets. This plugin also offers a ``--looponfailing`` +mode which will continously re-run only failing tests in a subprocess. supports several testing practises and methods ================================================================== @@ -40,9 +58,20 @@ with figleaf`_ or `Javasript unit- and f .. _`Javasript unit- and functional testing`: plugin/oejskit.html .. _`coverage testing with figleaf`: plugin/figleaf.html +integrates well with CI systems +==================================================== + +py.test can produce JUnitXML style output as well as formatted +"resultlog" files that can be postprocessed by Continous Integration +systems such as Hudson or Buildbot easily. It also provides command +line options to control test configuration lookup behaviour or ignoring +certain tests or directories. + no-boilerplate test functions with Python =================================================== +.. _`autocollect`: + automatic Python test discovery ------------------------------------ @@ -53,13 +82,18 @@ filename are inspected for finding tests * classes with a leading ``Test`` name and ``test`` prefixed methods. * ``unittest.TestCase`` subclasses -test functions can run with different argument sets ------------------------------------------------------------ +parametrizing test functions and advanced functional testing +-------------------------------------------------------------- py.test offers the unique `funcargs mechanism`_ for setting up and passing project-specific objects to Python test functions. Test Parametrization happens by triggering a call to the same test -functions with different argument values. +function with different argument values. For doing fixtures +using the funcarg mechanism makes your test and setup code +more efficient and more readable. This is especially true +for functional tests which might depend on command line +options and a setup that needs to be shared across +a whole test run. per-test capturing of output, including subprocesses ---------------------------------------------------- @@ -137,30 +171,6 @@ can make use of this feature. .. _`xUnit style setup`: xunit_setup.html .. _`pytest_nose`: plugin/nose.html -load-balance test runs to multiple CPUs -======================================== - -For large test suites you can distribute your -tests to multiple CPUs by issuing for example:: - - py.test -n 3 - -Read more on `distributed testing`_. - -.. _`distributed testing`: dist.html - -ad-hoc run tests cross-platform -================================================== - -py.test supports the sending of tests to -remote ssh-accounts, socket servers. -It can `ad-hoc run your test on multiple -platforms one a single test run`. Ad-hoc -means that there are **no installation -requirements whatsoever** on the remote side. - -.. _`ad-hoc run your test on multiple platforms one a single test run`: dist.html#atonce - advanced test selection and running modes ========================================================= @@ -202,21 +212,6 @@ plugin for more information. .. _`pytest_keyword`: plugin/mark.html -easy to extend -========================================= - -py.test has advanced `extension mechanisms`_ -with a growing `list of default plugins`_. -One can can easily modify or add aspects for for -purposes such as: - -* reporting extensions -* customizing collection and execution of tests -* running and managing non-python tests -* managing domain-specific test state setup - -.. _`list of default plugins`: plugin/index.html -.. _`extension mechanisms`: customize.html#extensions .. _`reStructured Text`: http://docutils.sourceforge.net .. _`Python debugger`: http://docs.python.org/lib/module-pdb.html --- a/doc/test/plugin/xdist.txt +++ b/doc/test/plugin/xdist.txt @@ -26,7 +26,7 @@ program source code to the remote place. are reported back and displayed to your local test session. You may specify different Python versions and interpreters. -.. _`pytest-xdist`: http://pytest.org/plugin/xdist.html +.. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist Usage examples --------------------- --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -6,12 +6,10 @@ plugins = [ ('advanced python testing', 'skipping mark pdb figleaf coverage ' 'monkeypatch capture recwarn tmpdir',), - ('other testing domains, misc', - 'oejskit django xdist genscript'), - ('reporting and failure logging', - 'pastebin junitxml xmlresult resultlog terminal',), - ('other testing conventions', - 'unittest nose doctest restdoc'), + ('distributed testing, CI and deployment', + 'xdist pastebin junitxml resultlog genscript',), + ('testing domains and conventions', + 'oejskit django unittest nose doctest restdoc'), ('core debugging / help functionality', 'helpconfig hooklog') #('internal plugins / core functionality', @@ -22,6 +20,8 @@ plugins = [ externals = { 'oejskit': "run javascript tests in real life browsers", + 'xdist': None, + 'figleaf': None, 'django': "for testing django applications", 'coverage': "for testing with Ned's coverage module ", 'xmlresult': "for generating xml reports " @@ -143,7 +143,10 @@ class PluginOverview(RestWriter): doc = PluginDoc(docpath) doc.make(config=config, name=name) self.add_internal_link(name, doc.target) - self.para("%s_ %s" %(name, doc.oneliner)) + if name in externals: + self.para("%s_ (external) %s" %(name, doc.oneliner)) + else: + self.para("%s_ %s" %(name, doc.oneliner)) self.Print() class HookSpec(RestWriter): From commits-noreply at bitbucket.org Mon Jan 18 16:49:04 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 15:49:04 +0000 (UTC) Subject: [py-svn] py-trunk commit 1d5d94e079b1: make sure we get an absolute path when writing the genscript file Message-ID: <20100118154904.B10DC7EF29@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263829700 -3600 # Node ID 1d5d94e079b11da790c3bc043fd90b1d5d4fb6b1 # Parent 357014f9745a4d5cf6698e45085c938f8ace8180 make sure we get an absolute path when writing the genscript file --- a/testing/plugin/test_pytest_genscript.py +++ b/testing/plugin/test_pytest_genscript.py @@ -7,9 +7,10 @@ def pytest_funcarg__standalone(request): class Standalone: def __init__(self, request): self.testdir = request.getfuncargvalue("testdir") - self.script = self.testdir.tmpdir.join("mypytest") - result = self.testdir.runpytest("--genscript=%s" % self.script) + script = "mypytest" + result = self.testdir.runpytest("--genscript=%s" % script) assert result.ret == 0 + self.script = self.testdir.tmpdir.join(script) assert self.script.check() def run(self, anypython, testdir, *args): --- a/py/_plugin/pytest_genscript.py +++ b/py/_plugin/pytest_genscript.py @@ -25,13 +25,15 @@ def pytest_configure(config): mydir = py.path.local(__file__).dirpath() infile = mydir.join("standalonetemplate.py") pybasedir = py.path.local(py.__file__).dirpath().dirpath() + genscript = py.path.local(genscript) main(pybasedir, outfile=genscript, infile=infile) raise SystemExit(0) def main(pybasedir, outfile, infile): - os.chdir(str(pybasedir)) outfile = str(outfile) infile = str(infile) + assert os.path.isabs(outfile) + os.chdir(str(pybasedir)) files = [] for dirpath, dirnames, filenames in os.walk("py"): for f in filenames: From commits-noreply at bitbucket.org Mon Jan 18 16:49:05 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 15:49:05 +0000 (UTC) Subject: [py-svn] py-trunk commit 357014f9745a: add terminal plugin to overview page again Message-ID: <20100118154905.CCF0C7EF1C@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263828386 -3600 # Node ID 357014f9745a4d5cf6698e45085c938f8ace8180 # Parent be113b5eb5f9a58e07c1007849571f3edfa7ee8b add terminal plugin to overview page again --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,4 +1,5 @@ .. _`helpconfig`: helpconfig.html +.. _`terminal`: terminal.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_monkeypatch.py @@ -14,6 +15,7 @@ .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html +.. _`xdist`: xdist.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_tmpdir.py .. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_figleaf.py @@ -31,8 +33,8 @@ .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`xdist`: xdist.html .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -10,8 +10,8 @@ plugins = [ 'xdist pastebin junitxml resultlog genscript',), ('testing domains and conventions', 'oejskit django unittest nose doctest restdoc'), - ('core debugging / help functionality', - 'helpconfig hooklog') + ('internal, debugging, help functionality', + 'helpconfig terminal hooklog') #('internal plugins / core functionality', # #'runner execnetcleanup # pytester', # 'runner execnetcleanup' # pytester', --- a/doc/test/plugin/index.txt +++ b/doc/test/plugin/index.txt @@ -51,11 +51,13 @@ doctest_ collect and execute doctests fr restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files. -core debugging / help functionality -=================================== +internal, debugging, help functionality +======================================= helpconfig_ provide version info, conftest/environment config names. +terminal_ Implements terminal reporting of the full testing process. + hooklog_ log invocations of extension hooks to a file. From commits-noreply at bitbucket.org Mon Jan 18 17:05:31 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 16:05:31 +0000 (UTC) Subject: [py-svn] py-trunk commit 4fc5212f7626: add report_header_info to release announce Message-ID: <20100118160531.6722B7EF16@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263830422 -3600 # Node ID 4fc5212f7626a56b9eb6437b5c673f56dd7eb942 # Parent 1d5d94e079b11da790c3bc043fd90b1d5d4fb6b1 add report_header_info to release announce and remove some ignores/excludes from MANIFEST.in for a less verbose installation experience --- a/doc/announce/release-1.2.0.txt +++ b/doc/announce/release-1.2.0.txt @@ -14,16 +14,17 @@ py.test 1.2.0 brings many bug fixes and * --genscript=path creates a standalone py.test-equivalent test-script * --ignore=path prevents collection of anything below that path * --confcutdir=path only lookup conftest.py test configs below that path -* a new "pytestconfig" function argument gives direct access to option values -* parametrized tests can now be specified per-class as well -* on CPython py.test additionally installs as "py.test-VERSION" +* a 'pytest_report_header' hook to add info to the terminal report header +* a 'pytestconfig' function argument gives direct access to option values +* 'pytest_generate_tests' can now be put into a class as well +* on CPython py.test additionally installs as "py.test-VERSION", on + Jython as py.test-jython and on PyPy as py.test-pypy-XYZ -Apart from many bug fixes 1.2.0 also has better pluginization. -Distributed testing and looponfailing testing have been moved -out into its own "pytest-xdist" plugin which can be installed separately. -The same is true for "pytest-figleaf" for doing coverage reporting. -Those can also serve well now as blue prints for doing your own. -separately released plugins. +Apart from many bug fixes 1.2.0 also has better pluginization: +Distributed testing and looponfailing testing now live in the +separately installable 'pytest-xdist' plugin. The same is true for +'pytest-figleaf' for doing coverage reporting. Those two plugins +can serve well now as blue prints for doing your own. thanks to all who helped and gave feedback, have fun, --- a/MANIFEST.in +++ b/MANIFEST.in @@ -8,11 +8,11 @@ graft doc graft contrib graft bin graft testing -exclude *.orig -exclude *.rej +#exclude *.orig +#exclude *.rej exclude .hginore -exclude *.pyc -recursive-exclude testing *.pyc *.orig *.rej *$py.class -prune .pyc -prune .svn -prune .hg +#exclude *.pyc +#recursive-exclude testing *.pyc *.orig *.rej *$py.class +#prune .pyc +#prune .svn +#prune .hg From commits-noreply at bitbucket.org Mon Jan 18 17:05:33 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 16:05:33 +0000 (UTC) Subject: [py-svn] py-trunk commit dc19b0f7b95c: Added tag 1.2.0 for changeset 4fc5212f7626 Message-ID: <20100118160533.69D2A7EF1E@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263830692 -3600 # Node ID dc19b0f7b95c13bac189fd8abf3fdd82262e1fd5 # Parent 4fc5212f7626a56b9eb6437b5c673f56dd7eb942 Added tag 1.2.0 for changeset 4fc5212f7626 --- a/.hgtags +++ b/.hgtags @@ -20,3 +20,4 @@ 6bd221981ac99103002c1cb94fede400d23a96a1 4816e8b80602a3fd3a0a120333ad85fbe7d8bab4 1.0.2 60c44bdbf093285dc69d5462d4dbb4acad325ca6 1.1.0 319187fcda66714c5eb1353492babeec3d3c826f 1.1.1 +4fc5212f7626a56b9eb6437b5c673f56dd7eb942 1.2.0 From commits-noreply at bitbucket.org Tue Jan 19 00:06:00 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 23:06:00 +0000 (UTC) Subject: [py-svn] apipkg commit 4e25c6a58cb6: add MANIFEST.in and move readme.txt to capital letter, bump version. Message-ID: <20100118230600.7DDE47EF28@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1263855938 -3600 # Node ID 4e25c6a58cb618bdcd061289dd66b3ceb11a495a # Parent c76615baa4a26f89adb1c52b26f401b35001006d add MANIFEST.in and move readme.txt to capital letter, bump version. --- a/readme.txt +++ /dev/null @@ -1,80 +0,0 @@ -Welcome to apipkg! ------------------------- - -With apipkg you can control the exported namespace of a -python package and greatly reduce the number of imports for your users. -It is a `small pure python module`_ that works on virtually all Python -versions, including CPython2.3 to Python3.1, Jython and PyPy. It co-operates -well with Python's ``help()`` system, custom importers (PEP302) and common -command line completion tools. - -Usage is very simple: you can require 'apipkg' as a dependency or you -can copy paste the <100 Lines of code into your project. - -Tutorial example -------------------- - -Here is a simple ``mypkg`` package that specifies one namespace -and exports two objects imported from different modules:: - - # mypkg/__init__.py - import apipkg - apipkg.initpkg(__name__, { - 'path': { - 'Class1': "_mypkg.somemodule:Class1", - 'Class2': "_mypkg.othermodule:Class2", - } - } - -The package is initialized with a dictionary as namespace. - -You need to create a ``_mypkg`` package with a ``somemodule.py`` -and ``othermodule.py`` containing the respective classes. -The ``_mypkg`` is not special - it's a completely -regular python package. - -Namespace dictionaries contain ``name: value`` mappings -where the value may be another namespace dictionary or -a string specifying an import location. On accessing -an namespace attribute an import will be performed:: - - >>> import mypkg - >>> mypkg.path - - >>> mypkg.sub.Class1 # '_mypkg.somemodule' gets imported now - - >>> mypkg.sub.Class2 # '_mypkg.othermodule' gets imported now - - -The ``mypkg.sub`` namespace and both its classes are -lazy loaded. Note that **no imports apart from the root -'import mypkg' is required**. This means that whoever -uses your Api only ever needs this one import. Of course -you can still use the import statement like so:: - - from mypkg.sub import Class1 - - -Including apipkg in your package --------------------------------------- - -If you don't want to add an ``apipkg`` dependency to your package you -can copy the `apipkg.py`_ file somewhere to your own package, -for example ``_mypkg/apipkg.py`` in the above example. You -then import the ``initpkg`` function from that new place and -are good to go. - -.. _`small pure python module`: -.. _`apipkg.py`: http://bitbucket.org/hpk42/apipkg/src/tip/apipkg.py - -Feedback? ------------------------ - -If you have questions you are welcome to - -* join the #pylib channel on irc.freenode.net -* subscribe to the http://codespeak.net/mailman/listinfo/py-dev list. -* create an issue on http://bitbucket.org/hpk42/apipkg/issues - -have fun, -holger krekel --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ def main(): name='apipkg', description= 'apipkg: namespace control and lazy-import mechanism', - long_description = open('readme.txt').read(), + long_description = open('README.txt').read(), version= __version__, url='http://bitbucket.org/hpk42/apipkg', license='MIT License', --- a/apipkg.py +++ b/apipkg.py @@ -8,7 +8,7 @@ see http://pypi.python.org/pypi/apipkg import sys from types import ModuleType -__version__ = "1.0b4" +__version__ = "1.0b5" def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. """ --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,7 @@ +include CHANGELOG +include README.txt +include setup.py +include LICENSE +include test_apipkg.py +prune .svn +prune .hg --- /dev/null +++ b/README.txt @@ -0,0 +1,80 @@ +Welcome to apipkg! +------------------------ + +With apipkg you can control the exported namespace of a +python package and greatly reduce the number of imports for your users. +It is a `small pure python module`_ that works on virtually all Python +versions, including CPython2.3 to Python3.1, Jython and PyPy. It co-operates +well with Python's ``help()`` system, custom importers (PEP302) and common +command line completion tools. + +Usage is very simple: you can require 'apipkg' as a dependency or you +can copy paste the <100 Lines of code into your project. + +Tutorial example +------------------- + +Here is a simple ``mypkg`` package that specifies one namespace +and exports two objects imported from different modules:: + + # mypkg/__init__.py + import apipkg + apipkg.initpkg(__name__, { + 'path': { + 'Class1': "_mypkg.somemodule:Class1", + 'Class2': "_mypkg.othermodule:Class2", + } + } + +The package is initialized with a dictionary as namespace. + +You need to create a ``_mypkg`` package with a ``somemodule.py`` +and ``othermodule.py`` containing the respective classes. +The ``_mypkg`` is not special - it's a completely +regular python package. + +Namespace dictionaries contain ``name: value`` mappings +where the value may be another namespace dictionary or +a string specifying an import location. On accessing +an namespace attribute an import will be performed:: + + >>> import mypkg + >>> mypkg.path + + >>> mypkg.sub.Class1 # '_mypkg.somemodule' gets imported now + + >>> mypkg.sub.Class2 # '_mypkg.othermodule' gets imported now + + +The ``mypkg.sub`` namespace and both its classes are +lazy loaded. Note that **no imports apart from the root +'import mypkg' is required**. This means that whoever +uses your Api only ever needs this one import. Of course +you can still use the import statement like so:: + + from mypkg.sub import Class1 + + +Including apipkg in your package +-------------------------------------- + +If you don't want to add an ``apipkg`` dependency to your package you +can copy the `apipkg.py`_ file somewhere to your own package, +for example ``_mypkg/apipkg.py`` in the above example. You +then import the ``initpkg`` function from that new place and +are good to go. + +.. _`small pure python module`: +.. _`apipkg.py`: http://bitbucket.org/hpk42/apipkg/src/tip/apipkg.py + +Feedback? +----------------------- + +If you have questions you are welcome to + +* join the #pylib channel on irc.freenode.net +* subscribe to the http://codespeak.net/mailman/listinfo/py-dev list. +* create an issue on http://bitbucket.org/hpk42/apipkg/issues + +have fun, +holger krekel From commits-noreply at bitbucket.org Tue Jan 19 00:06:02 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 23:06:02 +0000 (UTC) Subject: [py-svn] apipkg commit 976aa691cfae: Added tag 1.0.0b5 for changeset 4e25c6a58cb6 Message-ID: <20100118230602.B86957EF2B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1263855944 -3600 # Node ID 976aa691cfaeeb9793057d09ae9f9e6fd162be69 # Parent 4e25c6a58cb618bdcd061289dd66b3ceb11a495a Added tag 1.0.0b5 for changeset 4e25c6a58cb6 --- /dev/null +++ b/.hgtags @@ -0,0 +1,1 @@ +4e25c6a58cb618bdcd061289dd66b3ceb11a495a 1.0.0b5 From commits-noreply at bitbucket.org Tue Jan 19 00:07:34 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 23:07:34 +0000 (UTC) Subject: [py-svn] apipkg commit 47e2df93097a: no license file there Message-ID: <20100118230734.1A41B7EF28@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1263856044 -3600 # Node ID 47e2df93097a5dc5cfcf978a754342ba47bab8c3 # Parent d7b373136b6ca9ff69480e4009cfa5510cad0930 no license file there --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,6 @@ include CHANGELOG include README.txt include setup.py -include LICENSE include test_apipkg.py prune .svn prune .hg From commits-noreply at bitbucket.org Tue Jan 19 00:07:35 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Mon, 18 Jan 2010 23:07:35 +0000 (UTC) Subject: [py-svn] apipkg commit d7b373136b6c: Added tag 1.0b5 for changeset 976aa691cfae Message-ID: <20100118230735.BAB0A7EF2B@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1263856026 -3600 # Node ID d7b373136b6ca9ff69480e4009cfa5510cad0930 # Parent 976aa691cfaeeb9793057d09ae9f9e6fd162be69 Added tag 1.0b5 for changeset 976aa691cfae --- a/.hgtags +++ b/.hgtags @@ -1,1 +1,2 @@ 4e25c6a58cb618bdcd061289dd66b3ceb11a495a 1.0.0b5 +976aa691cfaeeb9793057d09ae9f9e6fd162be69 1.0b5 From commits-noreply at bitbucket.org Tue Jan 19 10:35:04 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Tue, 19 Jan 2010 09:35:04 +0000 (UTC) Subject: [py-svn] py-trunk commit 0421cc33ce8f: better default for bogus terminal getdimensions() call, fixes issue63 Message-ID: <20100119093504.6A8B583860@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1263893681 -3600 # Node ID 0421cc33ce8f9ab4c0f3bd2796f4f6b707fb443d # Parent dc19b0f7b95c13bac189fd8abf3fdd82262e1fd5 better default for bogus terminal getdimensions() call, fixes issue63 --- a/testing/io_/test_terminalwriter.py +++ b/testing/io_/test_terminalwriter.py @@ -8,14 +8,20 @@ def test_terminal_width_COLUMNS(monkeypa fcntl = py.test.importorskip("fcntl") monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x')) monkeypatch.setenv('COLUMNS', '42') - assert terminalwriter.get_terminal_width() == 41 + assert terminalwriter.get_terminal_width() == 42 monkeypatch.delenv('COLUMNS', raising=False) def test_terminalwriter_defaultwidth_80(monkeypatch): monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0) monkeypatch.delenv('COLUMNS', raising=False) tw = py.io.TerminalWriter() - assert tw.fullwidth == 80-1 + assert tw.fullwidth == 80 + +def test_terminalwriter_getdimensions_bogus(monkeypatch): + monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10)) + monkeypatch.delenv('COLUMNS', raising=False) + tw = py.io.TerminalWriter() + assert tw.fullwidth == 80 def test_terminalwriter_computes_width(monkeypatch): monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42) --- a/py/_io/terminalwriter.py +++ b/py/_io/terminalwriter.py @@ -74,9 +74,11 @@ def get_terminal_width(): raise except: # FALLBACK - width = int(os.environ.get('COLUMNS', 80))-1 - # XXX the windows getdimensions may be bogus, let's sanify a bit - width = max(width, 40) # we alaways need 40 chars + width = int(os.environ.get('COLUMNS', 80)) + else: + # XXX the windows getdimensions may be bogus, let's sanify a bit + if width < 40: + width = 80 return width terminal_width = get_terminal_width() --- a/CHANGELOG +++ b/CHANGELOG @@ -1,4 +1,9 @@ -Changes between 1.X and 1.1.1 +Changes between 1.2.1 and 1.2.0 +===================================== + +- fix issue63: assume <40 columns to be a bogus terminal width, default to 80 + +Changes between 1.2 and 1.1.1 ===================================== - moved dist/looponfailing from py.test core into a new From commits-noreply at bitbucket.org Thu Jan 21 19:35:52 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 21 Jan 2010 18:35:52 +0000 (UTC) Subject: [py-svn] py-trunk commit d14f120487dd: fix doc links, bump to dev version Message-ID: <20100121183552.046357EF76@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264098882 -3600 # Node ID d14f120487dd94101a8c3d659c1b70306c1c7a88 # Parent 0421cc33ce8f9ab4c0f3bd2796f4f6b707fb443d fix doc links, bump to dev version --- a/doc/install.txt +++ b/doc/install.txt @@ -21,6 +21,8 @@ py.test/pylib installation info in a nut **Installed scripts**: see `bin`_ for which and how scripts are installed. +**hg repository**: https://bitbucket.org/hpk42/py-trunk + .. _`bin`: bin.html --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ dictionary or an import path. (c) Holger Krekel and others, 2004-2010 """ -version = "1.2.0" +version = "1.2.0post1" __version__ = version = version or "1.2.x" import py.apipkg --- a/CHANGELOG +++ b/CHANGELOG @@ -2,6 +2,7 @@ Changes between 1.2.1 and 1.2.0 ===================================== - fix issue63: assume <40 columns to be a bogus terminal width, default to 80 +- fix plugin links Changes between 1.2 and 1.1.1 ===================================== --- a/doc/test/plugin/links.txt +++ b/doc/test/plugin/links.txt @@ -1,47 +1,47 @@ .. _`helpconfig`: helpconfig.html .. _`terminal`: terminal.html -.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_recwarn.py +.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_recwarn.py .. _`unittest`: unittest.html -.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_monkeypatch.py -.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_genscript.py +.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_monkeypatch.py +.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_genscript.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`genscript`: genscript.html .. _`plugins`: index.html .. _`mark`: mark.html .. _`tmpdir`: tmpdir.html -.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_doctest.py +.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_doctest.py .. _`capture`: capture.html -.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_nose.py -.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_restdoc.py +.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_nose.py +.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`xdist`: xdist.html -.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pastebin.py -.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_tmpdir.py -.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_figleaf.py -.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_hooklog.py +.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_pastebin.py +.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_tmpdir.py +.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_figleaf.py +.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_hooklog.py .. _`junitxml`: junitxml.html -.. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/plugin.py -.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_skipping.py +.. _`plugin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/plugin.py +.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout -.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_helpconfig.py +.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html -.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_mark.py +.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_mark.py .. _`get in contact`: ../../contact.html -.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_capture.py +.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html -.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_terminal.py +.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_terminal.py .. _`recwarn`: recwarn.html -.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_pdb.py +.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html -.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_junitxml.py +.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_junitxml.py .. _`django`: django.html -.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_unittest.py +.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_unittest.py .. _`nose`: nose.html -.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/plugin/pytest_resultlog.py +.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.2.0/py/_plugin/pytest_resultlog.py .. _`pdb`: pdb.html --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ def main(): name='py', description='py.test and pylib: rapid testing and development utils.', long_description = long_description, - version= trunk or '1.2.0', + version= trunk or '1.2.0post1', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], --- a/bin-for-dist/makepluginlist.py +++ b/bin-for-dist/makepluginlist.py @@ -217,7 +217,7 @@ class PluginDoc(RestWriter): # "py/test/plugin/%s" %(hg_changeset, basename))) self.links.append((basename, "http://bitbucket.org/hpk42/py-trunk/raw/%s/" - "py/plugin/%s" %(pyversion, basename))) + "py/_plugin/%s" %(pyversion, basename))) self.links.append(('customize', '../customize.html')) self.links.append(('plugins', 'index.html')) self.links.append(('get in contact', '../../contact.html')) From commits-noreply at bitbucket.org Thu Jan 21 20:01:26 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 21 Jan 2010 19:01:26 +0000 (UTC) Subject: [py-svn] apipkg commit 8c7bb85c04f1: fix recursive import error Message-ID: <20100121190126.91D017EF77@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1264099537 -3600 # Node ID 8c7bb85c04f17cf3de710d7323ebe852d64ac8e5 # Parent 47e2df93097a5dc5cfcf978a754342ba47bab8c3 fix recursive import error --- a/test_apipkg.py +++ b/test_apipkg.py @@ -105,6 +105,24 @@ class TestScenarios: assert mymodule.__doc__ == 'hello' assert mymodule.y.z == 3 + def test_recursive_import(self, monkeypatch, tmpdir): + pkgdir = tmpdir.mkdir("recmodule") + pkgdir.join('__init__.py').write(py.code.Source(""" + import apipkg + apipkg.initpkg(__name__, exportdefs={ + 'some': '.submod:someclass', + }) + """)) + pkgdir.join('submod.py').write(py.code.Source(""" + import recmodule + class someclass: pass + print (recmodule.__dict__) + """)) + monkeypatch.syspath_prepend(tmpdir) + import recmodule + assert isinstance(recmodule, apipkg.ApiModule) + assert recmodule.some.__name__ == "someclass" + def xtest_nested_absolute_imports(): import email api_email = apipkg.ApiModule('email',{ --- a/apipkg.py +++ b/apipkg.py @@ -8,7 +8,7 @@ see http://pypi.python.org/pypi/apipkg import sys from types import ModuleType -__version__ = "1.0b5" +__version__ = "1.0b6" def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. """ @@ -71,7 +71,10 @@ class ApiModule(ModuleType): else: result = importobj(modpath, attrname) setattr(self, name, result) - del self.__map__[name] + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen return result __getattr__ = __makeattr --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,18 @@ +1.0.0b6 +---------------------------------------- + +- fix recursive import issue resulting in a superflous KeyError + +1.0.0b5 +---------------------------------------- + +- fixed MANIFEST.in +- also transfer __loader__ attribute (thanks Ralf Schmitt) +- compat fix for BPython + 1.0.0b3 (compared to 1.0.0b2) ------------------------------------ - added special __onfirstaccess__ attribute whose value will be called on the first attribute access of an apimodule. + From commits-noreply at bitbucket.org Thu Jan 21 20:01:28 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 21 Jan 2010 19:01:28 +0000 (UTC) Subject: [py-svn] apipkg commit 6cb3976c8d8a: default to __version__ '0' and not set __loader__ or __path__ at all if it Message-ID: <20100121190128.4CC097EF78@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project apipkg # URL http://bitbucket.org/hpk42/apipkg/overview/ # User holger krekel # Date 1264100470 -3600 # Node ID 6cb3976c8d8aac3d332ed8f507cfdad34b4279a1 # Parent 8c7bb85c04f17cf3de710d7323ebe852d64ac8e5 default to __version__ '0' and not set __loader__ or __path__ at all if it doesn't exist on the underlying init module --- a/test_apipkg.py +++ b/test_apipkg.py @@ -196,13 +196,24 @@ def test_initpkg_transfers_attrs(monkeyp assert newmod.__version__ == mod.__version__ assert newmod.__loader__ == mod.__loader__ +def test_initpkg_not_transfers_not_existing_attrs(monkeypatch): + mod = type(sys)('hello') + mod.__file__ = "hello.py" + monkeypatch.setitem(sys.modules, 'hello', mod) + apipkg.initpkg('hello', {}) + newmod = sys.modules['hello'] + assert newmod != mod + assert newmod.__file__ == mod.__file__ + assert not hasattr(newmod, '__loader__') + assert not hasattr(newmod, '__path__') + def test_initpkg_defaults(monkeypatch): mod = type(sys)('hello') monkeypatch.setitem(sys.modules, 'hello', mod) apipkg.initpkg('hello', {}) newmod = sys.modules['hello'] assert newmod.__file__ == None - assert newmod.__version__ == None + assert newmod.__version__ == '0' def test_name_attribute(): api = apipkg.ApiModule('name_test', { --- a/apipkg.py +++ b/apipkg.py @@ -15,9 +15,10 @@ def initpkg(pkgname, exportdefs): mod = ApiModule(pkgname, exportdefs, implprefix=pkgname) oldmod = sys.modules[pkgname] mod.__file__ = getattr(oldmod, '__file__', None) - mod.__version__ = getattr(oldmod, '__version__', None) - mod.__path__ = getattr(oldmod, '__path__', None) - mod.__loader__ = getattr(oldmod, '__loader__', None) + mod.__version__ = getattr(oldmod, '__version__', '0') + for name in ('__path__', '__loader__'): + if hasattr(oldmod, name): + setattr(mod, name, getattr(oldmod, name)) sys.modules[pkgname] = mod def importobj(modpath, attrname): --- a/CHANGELOG +++ b/CHANGELOG @@ -2,6 +2,8 @@ 1.0.0b6 ---------------------------------------- - fix recursive import issue resulting in a superflous KeyError +- default to __version__ '0' and not set __loader__ or __path__ at all if it + doesn't exist on the underlying init module 1.0.0b5 ---------------------------------------- From commits-noreply at bitbucket.org Thu Jan 21 20:07:25 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 21 Jan 2010 19:07:25 +0000 (UTC) Subject: [py-svn] py-trunk commit c93eada6c865: upgrade apipkg.py to fix a potential recursive import issue Message-ID: <20100121190725.4631B7EF77@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264100810 -3600 # Node ID c93eada6c8658c505d50803aa3541aa6f278897b # Parent d14f120487dd94101a8c3d659c1b70306c1c7a88 upgrade apipkg.py to fix a potential recursive import issue --- a/py/apipkg.py +++ b/py/apipkg.py @@ -8,16 +8,17 @@ see http://pypi.python.org/pypi/apipkg import sys from types import ModuleType -__version__ = "1.0b4" +__version__ = "1.0b6" def initpkg(pkgname, exportdefs): """ initialize given package from the export definitions. """ mod = ApiModule(pkgname, exportdefs, implprefix=pkgname) oldmod = sys.modules[pkgname] mod.__file__ = getattr(oldmod, '__file__', None) - mod.__version__ = getattr(oldmod, '__version__', None) - mod.__path__ = getattr(oldmod, '__path__', None) - mod.__loader__ = getattr(oldmod, '__loader__', None) + mod.__version__ = getattr(oldmod, '__version__', '0') + for name in ('__path__', '__loader__'): + if hasattr(oldmod, name): + setattr(mod, name, getattr(oldmod, name)) sys.modules[pkgname] = mod def importobj(modpath, attrname): @@ -71,7 +72,10 @@ class ApiModule(ModuleType): else: result = importobj(modpath, attrname) setattr(self, name, result) - del self.__map__[name] + try: + del self.__map__[name] + except KeyError: + pass # in a recursive-import situation a double-del can happen return result __getattr__ = __makeattr --- a/CHANGELOG +++ b/CHANGELOG @@ -2,6 +2,8 @@ Changes between 1.2.1 and 1.2.0 ===================================== - fix issue63: assume <40 columns to be a bogus terminal width, default to 80 +- update apipkg.py to fix an issue where recursive imports might + unnecessarily break importing - fix plugin links Changes between 1.2 and 1.1.1 From commits-noreply at bitbucket.org Thu Jan 21 23:29:43 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 21 Jan 2010 22:29:43 +0000 (UTC) Subject: [py-svn] py-trunk commit 13115a130ed6: fail doc generation if pygments is not installed Message-ID: <20100121222943.18AE97EF70@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264112673 -3600 # Node ID 13115a130ed644130ed55def87838b8c1fa23d72 # Parent c93eada6c8658c505d50803aa3541aa6f278897b fail doc generation if pygments is not installed --- a/ISSUES.txt +++ b/ISSUES.txt @@ -107,3 +107,4 @@ Also consider implementing py.test --fun show available funcargs - it should honour the path::TestClass syntax so one can easily inspect where funcargs come from or which are available. + --- a/doc/conftest.py +++ b/doc/conftest.py @@ -3,3 +3,7 @@ import py #py.test.importorskip("pygments") pytest_plugins = ['pytest_restdoc'] collect_ignore = ['test/attic.txt'] + +def pytest_runtest_setup(item): + if item.fspath.ext == ".txt": + import pygments # for raising an error From commits-noreply at bitbucket.org Wed Jan 27 12:12:58 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 27 Jan 2010 11:12:58 +0000 (UTC) Subject: [py-svn] py-trunk commit f0035a36f714: fix issue78 - now python-level teardown functions are now called even if the setup failed. Message-ID: <20100127111258.1836A7EF32@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264590570 -3600 # Node ID f0035a36f714f9d06da65162a50a81a1e1947363 # Parent 13115a130ed644130ed55def87838b8c1fa23d72 fix issue78 - now python-level teardown functions are now called even if the setup failed. Important detail: if the setup raises a Skipped exception, teardown will not be called. This helps to avoid breaking setup_module/class that performs a skip - it would otherwise internally be considered as a "successful" setup in order to have teardown called later. I guess it also makes sense to treat Skip specially because it is unlikely a teardown should be called if a Skip was raised on setup. In any case, failing setups and teardowns will be reported separately. --- a/testing/plugin/test_pytest_runner_xunit.py +++ b/testing/plugin/test_pytest_runner_xunit.py @@ -134,3 +134,38 @@ def test_method_setup_uses_fresh_instanc """) reprec.assertoutcome(passed=2, failed=0) +def test_failing_setup_calls_teardown(testdir): + p = testdir.makepyfile(""" + def setup_module(mod): + raise ValueError(42) + def test_function(): + assert 0 + def teardown_module(mod): + raise ValueError(43) + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*42*", + "*43*", + "*2 error*" + ]) + +def test_setup_that_skips_calledagain_and_no_teardown(testdir): + p = testdir.makepyfile(""" + import py + def setup_module(mod): + py.test.skip("x") + def test_function1(): + pass + def test_function2(): + pass + def teardown_module(mod): + raise ValueError(43) + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*2 skipped*", + ]) + assert "43" not in result.stdout.str() + + --- a/py/_plugin/pytest_runner.py +++ b/py/_plugin/pytest_runner.py @@ -249,5 +249,9 @@ class SetupState(object): break self._pop_and_teardown() for col in needed_collectors[len(self.stack):]: - col.setup() self.stack.append(col) + try: + col.setup() + except Skipped: + self.stack.pop() + raise --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,9 @@ Changes between 1.2.1 and 1.2.0 ===================================== +- fix issue78: always call python-level teardown functions even if the + according setup failed - but make sure that setup is called repeatedly + and no teardown if the setup raises a Skipped (as sone by py.test.skip()). - fix issue63: assume <40 columns to be a bogus terminal width, default to 80 - update apipkg.py to fix an issue where recursive imports might unnecessarily break importing From commits-noreply at bitbucket.org Wed Jan 27 12:52:41 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 27 Jan 2010 11:52:41 +0000 (UTC) Subject: [py-svn] py-trunk commit f50624ea4488: closes #67 new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value Message-ID: <20100127115241.D18B47EED0@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264593139 -3600 # Node ID f50624ea4488e43c0d472f1de853e336217368c4 # Parent f0035a36f714f9d06da65162a50a81a1e1947363 closes #67 new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value --- a/testing/plugin/test_pytest_terminal.py +++ b/testing/plugin/test_pytest_terminal.py @@ -153,6 +153,26 @@ class TestTerminal: assert '--calling--' not in s assert 'IndexError' not in s + def test_tb_crashline(self, testdir, option): + p = testdir.makepyfile(""" + import py + def g(): + raise IndexError + def test_func1(): + print (6*7) + g() # --calling-- + def test_func2(): + assert 0, "hello" + """) + result = testdir.runpytest("--tb=line") + bn = p.basename + result.stdout.fnmatch_lines([ + "*%s:3: IndexError*" % bn, + "*%s:8: AssertionError: hello*" % bn, + ]) + s = result.stdout.str() + assert "def test_func2" not in s + def test_show_path_before_running_test(self, testdir, linecomp): item = testdir.getitem("def test_func(): pass") tr = TerminalReporter(item.config, file=linecomp.stringio) --- a/py/_plugin/pytest_terminal.py +++ b/py/_plugin/pytest_terminal.py @@ -18,8 +18,8 @@ def pytest_addoption(parser): help="show more info, valid: skipped,xfailed") group._addoption('--tb', metavar="style", action="store", dest="tbstyle", default='long', - type="choice", choices=['long', 'short', 'no'], - help="traceback verboseness (long/short/no).") + type="choice", choices=['long', 'short', 'no', 'line'], + help="traceback print mode (long/short/line/no).") group._addoption('--fulltrace', action="store_true", dest="fulltrace", default=False, help="don't cut any tracebacks (default is to cut).") @@ -272,15 +272,18 @@ class TerminalReporter: if failreports: self.write_sep("#", "LOOPONFAILING", red=True) for report in failreports: - try: - loc = report.longrepr.reprcrash - except AttributeError: - loc = str(report.longrepr)[:50] + loc = self._getcrashline(report) self.write_line(loc, red=True) self.write_sep("#", "waiting for changes") for rootdir in rootdirs: self.write_line("### Watching: %s" %(rootdir,), bold=True) + def _getcrashline(self, report): + try: + return report.longrepr.reprcrash + except AttributeError: + return str(report.longrepr)[:50] + def _reportinfoline(self, item): collect_fspath = self._getfspath(item) fspath, lineno, msg = self._getreportinfo(item) @@ -333,13 +336,18 @@ class TerminalReporter: # def summary_failures(self): - if 'failed' in self.stats and self.config.option.tbstyle != "no": + tbstyle = self.config.getvalue("tbstyle") + if 'failed' in self.stats and tbstyle != "no": self.write_sep("=", "FAILURES") for rep in self.stats['failed']: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg) - self.write_platinfo(rep) - rep.toterminal(self._tw) + if tbstyle == "line": + line = self._getcrashline(rep) + self.write_line(line) + else: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg) + self.write_platinfo(rep) + rep.toterminal(self._tw) def summary_errors(self): if 'error' in self.stats and self.config.option.tbstyle != "no": --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,7 @@ Changes between 1.2.1 and 1.2.0 ===================================== +- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value - fix issue78: always call python-level teardown functions even if the according setup failed - but make sure that setup is called repeatedly and no teardown if the setup raises a Skipped (as sone by py.test.skip()). From commits-noreply at bitbucket.org Wed Jan 27 13:02:27 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Wed, 27 Jan 2010 12:02:27 +0000 (UTC) Subject: [py-svn] py-trunk commit 4ddb2d37307d: install pygments for tests Message-ID: <20100127120227.626FA7EF33@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264593722 -3600 # Node ID 4ddb2d37307df8a2bfb7df8e63039f735ada6c64 # Parent f50624ea4488e43c0d472f1de853e336217368c4 install pygments for tests --- a/testing/pip-reqs1.txt +++ b/testing/pip-reqs1.txt @@ -1,4 +1,5 @@ docutils +pygments pexpect figleaf hg+http://bitbucket.org/hpk42/execnet#egg=execnet --- a/testing/pip-reqs2.txt +++ b/testing/pip-reqs2.txt @@ -1,4 +1,5 @@ docutils +pygments pexpect figleaf execnet From commits-noreply at bitbucket.org Thu Jan 28 14:24:35 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 28 Jan 2010 13:24:35 +0000 (UTC) Subject: [py-svn] py-trunk commit b27a62c65746: again addresses issue78 : we now call teardown also if setup raised a Skipped exception. Message-ID: <20100128132435.A5E517EF62@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264684858 -3600 # Node ID b27a62c65746ed0a2bc1625729420376d1d851be # Parent 4ddb2d37307df8a2bfb7df8e63039f735ada6c64 again addresses issue78 : we now call teardown also if setup raised a Skipped exception. I also made sure, setup_module/class will only be called once - before they'd be call again and again if they raise an error or a skip - for each test in their scope. --- a/testing/plugin/test_pytest_runner.py +++ b/testing/plugin/test_pytest_runner.py @@ -34,6 +34,16 @@ class TestSetupState: ss.teardown_exact(item) ss.teardown_exact(item) + def test_setup_fails_and_failure_is_cached(self, testdir): + item = testdir.getitem(""" + def setup_module(mod): + raise ValueError(42) + def test_func(): pass + """) + ss = runner.SetupState() + py.test.raises(ValueError, "ss.prepare(item)") + py.test.raises(ValueError, "ss.prepare(item)") + class BaseFunctionalTests: def test_passfunction(self, testdir): reports = testdir.runitem(""" --- a/testing/plugin/test_pytest_runner_xunit.py +++ b/testing/plugin/test_pytest_runner_xunit.py @@ -150,7 +150,7 @@ def test_failing_setup_calls_teardown(te "*2 error*" ]) -def test_setup_that_skips_calledagain_and_no_teardown(testdir): +def test_setup_that_skips_calledagain_and_teardown(testdir): p = testdir.makepyfile(""" import py def setup_module(mod): @@ -164,8 +164,27 @@ def test_setup_that_skips_calledagain_an """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ - "*2 skipped*", + "*ValueError*43*", + "*2 skipped*1 error*", ]) - assert "43" not in result.stdout.str() +def test_setup_fails_again_on_all_tests(testdir): + p = testdir.makepyfile(""" + import py + def setup_module(mod): + raise ValueError(42) + def test_function1(): + pass + def test_function2(): + pass + def teardown_module(mod): + raise ValueError(43) + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*3 error*" + ]) + assert "passed" not in result.stdout.str() + + --- a/py/_plugin/pytest_runner.py +++ b/py/_plugin/pytest_runner.py @@ -2,7 +2,7 @@ collect and run test items and create reports. """ -import py +import py, sys from py._test.outcome import Skipped # @@ -252,6 +252,10 @@ class SetupState(object): self.stack.append(col) try: col.setup() - except Skipped: - self.stack.pop() - raise + except Exception: + col._prepare_exc = sys.exc_info() + raise + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, '_prepare_exc'): + py.builtin._reraise(*col._prepare_exc) --- a/CHANGELOG +++ b/CHANGELOG @@ -3,8 +3,10 @@ Changes between 1.2.1 and 1.2.0 - fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value - fix issue78: always call python-level teardown functions even if the - according setup failed - but make sure that setup is called repeatedly - and no teardown if the setup raises a Skipped (as sone by py.test.skip()). + according setup failed. This includes refinements for calling setup_module/class functions + which will now only be called once instead of the previous behaviour where they'd be called + multiple times if they raise an exception (including a Skipped exception). Any exception + will be re-corded and associated with all tests in the according module/class scope. - fix issue63: assume <40 columns to be a bogus terminal width, default to 80 - update apipkg.py to fix an issue where recursive imports might unnecessarily break importing From commits-noreply at bitbucket.org Thu Jan 28 15:36:44 2010 From: commits-noreply at bitbucket.org (commits-noreply at bitbucket.org) Date: Thu, 28 Jan 2010 14:36:44 +0000 (UTC) Subject: [py-svn] py-trunk commit 0c6cafb6f4e6: refine setup ordering some more - test and avoid a problem with funcarg setups where the Message-ID: <20100128143644.A70277EF68@bitbucket.org> # HG changeset patch -- Bitbucket.org # Project py-trunk # URL http://bitbucket.org/hpk42/py-trunk/overview/ # User holger krekel # Date 1264689387 -3600 # Node ID 0c6cafb6f4e6920baa63a0a86fd8113bfc2f07b1 # Parent b27a62c65746ed0a2bc1625729420376d1d851be refine setup ordering some more - test and avoid a problem with funcarg setups where the surrounding setup_module would fail, but the funcarg setup still be called (which might assume that setup_module has been called so would raise a confusing error) --- a/testing/plugin/test_pytest_runner_xunit.py +++ b/testing/plugin/test_pytest_runner_xunit.py @@ -186,5 +186,27 @@ def test_setup_fails_again_on_all_tests( ]) assert "passed" not in result.stdout.str() +def test_setup_funcarg_setup_not_called_if_outer_scope_fails(testdir): + p = testdir.makepyfile(""" + import py + def setup_module(mod): + raise ValueError(42) + def pytest_funcarg__hello(request): + raise ValueError(43) + def test_function1(hello): + pass + def test_function2(hello): + pass + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*function1*", + "*ValueError*42*", + "*function2*", + "*ValueError*42*", + "*2 error*" + ]) + assert "43" not in result.stdout.str() + --- a/py/_plugin/pytest_runner.py +++ b/py/_plugin/pytest_runner.py @@ -248,6 +248,10 @@ class SetupState(object): if self.stack == needed_collectors[:len(self.stack)]: break self._pop_and_teardown() + # check if the last collection node has raised an error + for col in self.stack: + if hasattr(col, '_prepare_exc'): + py.builtin._reraise(*col._prepare_exc) for col in needed_collectors[len(self.stack):]: self.stack.append(col) try: @@ -255,7 +259,3 @@ class SetupState(object): except Exception: col._prepare_exc = sys.exc_info() raise - # check if the last collection node has raised an error - for col in self.stack: - if hasattr(col, '_prepare_exc'): - py.builtin._reraise(*col._prepare_exc)