From commits-noreply at bitbucket.org Sun Nov 6 16:52:56 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Sun, 06 Nov 2011 15:52:56 -0000 Subject: [py-svn] commit/pytest: hpk42: fix FD leakage during pytest's own test run and add "--lsof" option to tox default test runs. Message-ID: <20111106155256.15037.55565@bitbucket05.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/67adc7327ef3/ changeset: 67adc7327ef3 user: hpk42 date: 2011-11-06 16:40:17 summary: fix FD leakage during pytest's own test run and add "--lsof" option to tox default test runs. the leakage came down to a problematic bit of the stdlib logging module: it takes ownerships of stdout/stderr making it hard for pytest to implement clean capturing. The current work around is to add some extra code in the setup machinery of pytest's own tests which actually closes sub-FDs. affected #: 6 files diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,7 @@ Changes between 2.1.3 and [next version] ---------------------------------------- +- fix pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.1.3' +__version__ = '2.1.4.dev1' diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -402,6 +402,15 @@ config.pluginmanager.do_configure(config) self.request.addfinalizer(lambda: config.pluginmanager.do_unconfigure(config)) + # XXX we need to additionally reset FDs to prevent pen FDs + # during our test suite. see also capture.py's unconfigure XXX + # comment about logging + def finalize_capman(): + capman = config.pluginmanager.getplugin('capturemanager') + while capman._method2capture: + name, cap = capman._method2capture.popitem() + cap.reset() + self.request.addfinalizer(finalize_capman) return config def getitem(self, source, funcname="test_func"): diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.1.3', + version='2.1.4.dev1', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 testing/conftest.py --- a/testing/conftest.py +++ b/testing/conftest.py @@ -35,7 +35,7 @@ __multicall__.execute() out2 = py.process.cmdexec("lsof -p %d" % pid) len2 = getopenfiles(out2) - assert len2 < config._numfiles + 7, out2 + assert len2 < config._numfiles + 15, out2 def pytest_runtest_setup(item): diff -r bc2f63120b354025a3de44a73ad1d8bd003a04c3 -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 tox.ini --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,7 @@ [testenv] changedir=testing -commands= py.test -rfsxX --junitxml={envlogdir}/junit-{envname}.xml [] +commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml [] deps= :pypi:pexpect :pypi:nose Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Mon Nov 7 19:10:49 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Mon, 07 Nov 2011 18:10:49 -0000 Subject: [py-svn] commit/pytest: 2 new changesets Message-ID: <20111107181049.12841.12342@bitbucket03.managed.contegix.com> 2 new commits in pytest: https://bitbucket.org/hpk42/pytest/changeset/f025865402cc/ changeset: f025865402cc user: hpk42 date: 2011-11-06 20:34:02 summary: don't remove symlinks from temporary directory path - should help with some standard OSX setups affected #: 4 files diff -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 -r f025865402ccf7cc929c32275582c31368816393 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.1.4.dev1' +__version__ = '2.1.4.dev2' diff -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 -r f025865402ccf7cc929c32275582c31368816393 _pytest/tmpdir.py --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -46,7 +46,7 @@ def finish(self): self.trace("finish") - + def pytest_configure(config): mp = monkeypatch() t = TempdirHandler(config) @@ -64,5 +64,5 @@ name = request._pyfuncitem.name name = py.std.re.sub("[\W]", "_", name) x = request.config._tmpdirhandler.mktemp(name, numbered=True) - return x.realpath() + return x diff -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 -r f025865402ccf7cc929c32275582c31368816393 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.1.4.dev1', + version='2.1.4.dev2', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 67adc7327ef35aa3bafd4d423b99eda72e39faa3 -r f025865402ccf7cc929c32275582c31368816393 testing/test_tmpdir.py --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -75,3 +75,16 @@ result = testdir.runpytest(p, '--basetemp=%s' % mytemp) assert result.ret == 0 assert mytemp.join('hello').check() + + at pytest.mark.skipif("not hasattr(os, 'symlink')") +def test_tmpdir_keeps_symlinks(testdir): + realtemp = testdir.tmpdir.mkdir("myrealtemp") + linktemp = testdir.tmpdir.join("symlinktemp") + linktemp.mksymlinkto(realtemp) + p = testdir.makepyfile(""" + def test_1(tmpdir): + import os + assert os.path.realpath(str(tmpdir)) != str(tmpdir) + """) + result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp) + assert not result.ret https://bitbucket.org/hpk42/pytest/changeset/40ed9c90eed1/ changeset: 40ed9c90eed1 user: hpk42 date: 2011-11-07 19:08:41 summary: refine lsof/FD leakage testing and rework test setup and some of pytest own tests. Note that the actual diff to non-test code is small. Also remove some redundant tests (introduced by a copy-paste-error apparently in test_mark.py). affected #: 19 files diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,7 +1,7 @@ Changes between 2.1.3 and [next version] ---------------------------------------- -- fix pytest's own test suite to not leak FDs +- fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.1.4.dev2' +__version__ = '2.1.4.dev3' diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -11,20 +11,23 @@ group._addoption('-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + at pytest.mark.tryfirst +def pytest_cmdline_parse(pluginmanager, args): + # we want to perform capturing already for plugin/conftest loading + if '-s' in args or "--capture=no" in args: + method = "no" + elif hasattr(os, 'dup') and '--capture=sys' not in args: + method = "fd" + else: + method = "sys" + capman = CaptureManager(method) + pluginmanager.register(capman, "capturemanager") + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) -def pytest_unconfigure(config): - # registered in config.py during early conftest.py loading - capman = config.pluginmanager.getplugin('capturemanager') - while capman._method2capture: - name, cap = capman._method2capture.popitem() - # XXX logging module may wants to close it itself on process exit - # otherwise we could do finalization here and call "reset()". - cap.suspend() - class NoCapture: def startall(self): pass @@ -36,8 +39,9 @@ return "", "" class CaptureManager: - def __init__(self): + def __init__(self, defaultmethod=None): self._method2capture = {} + self._defaultmethod = defaultmethod def _maketempfile(self): f = py.std.tempfile.TemporaryFile() @@ -62,14 +66,6 @@ else: raise ValueError("unknown capturing method: %r" % method) - def _getmethod_preoptionparse(self, args): - if '-s' in args or "--capture=no" in args: - return "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: - return "fd" - else: - return "sys" - def _getmethod(self, config, fspath): if config.option.capture: method = config.option.capture @@ -82,16 +78,22 @@ method = "sys" return method + def reset_capturings(self): + for name, cap in self._method2capture.items(): + cap.reset() + def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) - def resumecapture(self, method): + def resumecapture(self, method=None): if hasattr(self, '_capturing'): raise ValueError("cannot resume, already capturing with %r" % (self._capturing,)) + if method is None: + method = self._defaultmethod cap = self._method2capture.get(method) self._capturing = method if cap is None: diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -11,8 +11,12 @@ return config def pytest_unconfigure(config): - for func in config._cleanup: - func() + while 1: + try: + fin = config._cleanup.pop() + except IndexError: + break + fin() class Parser: """ Parser for command line arguments. """ @@ -254,11 +258,14 @@ self.hook = self.pluginmanager.hook self._inicache = {} self._cleanup = [] - + @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ config = cls() + # XXX slightly crude way to initialize capturing + import _pytest.capture + _pytest.capture.pytest_cmdline_parse(config.pluginmanager, args) config._preparse(args, addopts=False) config.option.__dict__.update(option_dict) for x in config.option.plugins: @@ -283,11 +290,10 @@ def _setinitialconftest(self, args): # capture output during conftest init (#issue93) - from _pytest.capture import CaptureManager - capman = CaptureManager() - self.pluginmanager.register(capman, 'capturemanager') - # will be unregistered in capture.py's unconfigure() - capman.resumecapture(capman._getmethod_preoptionparse(args)) + # XXX introduce load_conftest hook to avoid needing to know + # about capturing plugin here + capman = self.pluginmanager.getplugin("capturemanager") + capman.resumecapture() try: try: self._conftest.setinitial(args) diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -431,10 +431,7 @@ def _preloadplugins(): _preinit.append(PluginManager(load=True)) -def main(args=None, plugins=None): - """ returned exit code integer, after an in-process testing run - with the given command line arguments, preloading an optional list - of passed in plugin objects. """ +def _prepareconfig(args=None, plugins=None): if args is None: args = sys.argv[1:] elif isinstance(args, py.path.local): @@ -448,13 +445,19 @@ else: # subsequent calls to main will create a fresh instance _pluginmanager = PluginManager(load=True) hook = _pluginmanager.hook + if plugins: + for plugin in plugins: + _pluginmanager.register(plugin) + return hook.pytest_cmdline_parse( + pluginmanager=_pluginmanager, args=args) + +def main(args=None, plugins=None): + """ returned exit code integer, after an in-process testing run + with the given command line arguments, preloading an optional list + of passed in plugin objects. """ try: - if plugins: - for plugin in plugins: - _pluginmanager.register(plugin) - config = hook.pytest_cmdline_parse( - pluginmanager=_pluginmanager, args=args) - exitstatus = hook.pytest_cmdline_main(config=config) + config = _prepareconfig(args, plugins) + exitstatus = config.hook.pytest_cmdline_main(config=config) except UsageError: e = sys.exc_info()[1] sys.stderr.write("ERROR: %s\n" %(e.args[0],)) diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/helpconfig.py --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -56,6 +56,7 @@ elif config.option.help: config.pluginmanager.do_configure(config) showhelp(config) + config.pluginmanager.do_unconfigure(config) return 0 def showhelp(config): @@ -113,7 +114,7 @@ verinfo = getpluginversioninfo(config) if verinfo: lines.extend(verinfo) - + if config.option.traceconfig: lines.append("active plugins:") plugins = [] diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -50,7 +50,7 @@ def pytest_namespace(): collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) return dict(collect=collect) - + def pytest_configure(config): py.test.config = config # compatibiltiy if config.option.exitfirst: @@ -134,7 +134,7 @@ return getattr(pytest, name) return property(fget, None, None, "deprecated attribute %r, use pytest.%s" % (name,name)) - + class Node(object): """ base class for all Nodes in the collection tree. Collector subclasses have children, Items are terminal nodes.""" @@ -145,13 +145,13 @@ #: the parent collector node. self.parent = parent - + #: the test config object self.config = config or parent.config #: the collection this node is part of self.session = session or parent.session - + #: filesystem path where this node was collected from self.fspath = getattr(parent, 'fspath', None) self.ihook = self.session.gethookproxy(self.fspath) @@ -488,7 +488,7 @@ else: if fd is not None: fd.close() - + if type_[2] != imp.PKG_DIRECTORY: path = [os.path.dirname(mod)] else: @@ -511,7 +511,7 @@ raise pytest.UsageError(msg + arg) parts[0] = path return parts - + def matchnodes(self, matching, names): self.trace("matchnodes", matching, names) self.trace.root.indent += 1 diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -314,16 +314,6 @@ result.extend(session.genitems(colitem)) return result - def inline_genitems(self, *args): - #config = self.parseconfig(*args) - config = self.parseconfigure(*args) - rec = self.getreportrecorder(config) - session = Session(config) - config.hook.pytest_sessionstart(session=session) - session.perform_collect() - config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) - return session.items, rec - def runitem(self, source): # used from runner functional tests item = self.getitem(source) @@ -347,70 +337,52 @@ assert len(reports) == 1, reports return reports[0] + def inline_genitems(self, *args): + return self.inprocess_run(list(args) + ['--collectonly']) + def inline_run(self, *args): - args = ("-s", ) + args # otherwise FD leakage - config = self.parseconfig(*args) - reprec = self.getreportrecorder(config) - #config.pluginmanager.do_configure(config) - config.hook.pytest_cmdline_main(config=config) - #config.pluginmanager.do_unconfigure(config) - return reprec + items, rec = self.inprocess_run(args) + return rec - def config_preparse(self): - config = self.Config() - for plugin in self.plugins: - if isinstance(plugin, str): - config.pluginmanager.import_plugin(plugin) - else: - if isinstance(plugin, dict): - plugin = PseudoPlugin(plugin) - if not config.pluginmanager.isregistered(plugin): - config.pluginmanager.register(plugin) - return config + def inprocess_run(self, args, plugins=None): + rec = [] + items = [] + class Collect: + def pytest_configure(x, config): + rec.append(self.getreportrecorder(config)) + def pytest_itemcollected(self, item): + items.append(item) + if not plugins: + plugins = [] + plugins.append(Collect()) + self.pytestmain(list(args), plugins=[Collect()]) + assert len(rec) == 1 + return items, rec[0] def parseconfig(self, *args): - if not args: - args = (self.tmpdir,) - config = self.config_preparse() - args = list(args) + args = map(str, args) for x in args: if str(x).startswith('--basetemp'): break else: args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) - config.parse(args) + import _pytest.core + config = _pytest.core._prepareconfig(args, self.plugins) + # the in-process pytest invocation needs to avoid leaking FDs + # so we register a "reset_capturings" callmon the capturing manager + # and make sure it gets called + config._cleanup.append( + config.pluginmanager.getplugin("capturemanager").reset_capturings) + import _pytest.config + self.request.addfinalizer( + lambda: _pytest.config.pytest_unconfigure(config)) return config - def reparseconfig(self, args=None): - """ this is used from tests that want to re-invoke parse(). """ - if not args: - args = [self.tmpdir] - oldconfig = getattr(py.test, 'config', None) - try: - c = py.test.config = self.Config() - c.basetemp = py.path.local.make_numbered_dir(prefix="reparse", - keep=0, rootdir=self.tmpdir, lock_timeout=None) - c.parse(args) - c.pluginmanager.do_configure(c) - self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c)) - return c - finally: - py.test.config = oldconfig - def parseconfigure(self, *args): config = self.parseconfig(*args) config.pluginmanager.do_configure(config) self.request.addfinalizer(lambda: - config.pluginmanager.do_unconfigure(config)) - # XXX we need to additionally reset FDs to prevent pen FDs - # during our test suite. see also capture.py's unconfigure XXX - # comment about logging - def finalize_capman(): - capman = config.pluginmanager.getplugin('capturemanager') - while capman._method2capture: - name, cap = capman._method2capture.popitem() - cap.reset() - self.request.addfinalizer(finalize_capman) + config.pluginmanager.do_unconfigure(config)) return config def getitem(self, source, funcname="test_func"): @@ -430,7 +402,6 @@ self.makepyfile(__init__ = "#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) - #config.pluginmanager.do_unconfigure(config) return node def collect_by_name(self, modcol, name): @@ -447,9 +418,16 @@ return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) def pytestmain(self, *args, **kwargs): - ret = pytest.main(*args, **kwargs) - if ret == 2: - raise KeyboardInterrupt() + class ResetCapturing: + @pytest.mark.trylast + def pytest_unconfigure(self, config): + capman = config.pluginmanager.getplugin("capturemanager") + capman.reset_capturings() + plugins = kwargs.setdefault("plugins", []) + rc = ResetCapturing() + plugins.append(rc) + return pytest.main(*args, **kwargs) + def run(self, *cmdargs): return self._run(*cmdargs) @@ -550,10 +528,6 @@ return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( py.io.saferepr(out),) -class PseudoPlugin: - def __init__(self, vars): - self.__dict__.update(vars) - class ReportRecorder(object): def __init__(self, hook): self.hook = hook diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 _pytest/terminal.py --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -43,7 +43,8 @@ pass else: stdout = os.fdopen(newfd, stdout.mode, 1) - config._toclose = stdout + config._cleanup.append(lambda: stdout.close()) + reporter = TerminalReporter(config, stdout) config.pluginmanager.register(reporter, 'terminalreporter') if config.option.debug or config.option.traceconfig: @@ -52,11 +53,6 @@ reporter.write_line("[traceconfig] " + msg) config.trace.root.setprocessor("pytest:config", mywriter) -def pytest_unconfigure(config): - if hasattr(config, '_toclose'): - #print "closing", config._toclose, config._toclose.fileno() - config._toclose.close() - def getreportopt(config): reportopts = "" optvalue = config.option.report diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.1.4.dev2', + version='2.1.4.dev3', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -13,6 +13,12 @@ '*ERROR: hello' ]) + def test_root_conftest_syntax_error(self, testdir): + p = testdir.makepyfile(conftest="raise SyntaxError\n") + result = testdir.runpytest() + result.stderr.fnmatch_lines(["*raise SyntaxError*"]) + assert result.ret != 0 + def test_early_hook_error_issue38_1(self, testdir): testdir.makeconftest(""" def pytest_sessionstart(): @@ -354,24 +360,24 @@ def test_equivalence_pytest_pytest(self): assert pytest.main == py.test.cmdline.main - def test_invoke_with_string(self, capsys): - retcode = pytest.main("-h") + def test_invoke_with_string(self, testdir, capsys): + retcode = testdir.pytestmain("-h") assert not retcode out, err = capsys.readouterr() assert "--help" in out - pytest.raises(ValueError, lambda: pytest.main(retcode)) + pytest.raises(ValueError, lambda: pytest.main(0)) def test_invoke_with_path(self, testdir, capsys): retcode = testdir.pytestmain(testdir.tmpdir) assert not retcode out, err = capsys.readouterr() - def test_invoke_plugin_api(self, capsys): + def test_invoke_plugin_api(self, testdir, capsys): class MyPlugin: def pytest_addoption(self, parser): parser.addoption("--myopt") - pytest.main(["-h"], plugins=[MyPlugin()]) + testdir.pytestmain(["-h"], plugins=[MyPlugin()]) out, err = capsys.readouterr() assert "--myopt" in out diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/conftest.py --- a/testing/conftest.py +++ b/testing/conftest.py @@ -18,7 +18,7 @@ except py.process.cmdexec.Error: pass else: - config._numfiles = getopenfiles(out) + config._numfiles = len(getopenfiles(out)) #def pytest_report_header(): # return "pid: %s" % os.getpid() @@ -26,23 +26,31 @@ def getopenfiles(out): def isopen(line): return ("REG" in line or "CHR" in line) and ( - "deleted" not in line and 'mem' not in line) - return len([x for x in out.split("\n") if isopen(x)]) + "deleted" not in line and 'mem' not in line and "txt" not in line) + return [x for x in out.split("\n") if isopen(x)] -def pytest_unconfigure(config, __multicall__): - if not hasattr(config, '_numfiles'): - return - __multicall__.execute() +def check_open_files(config): out2 = py.process.cmdexec("lsof -p %d" % pid) - len2 = getopenfiles(out2) - assert len2 < config._numfiles + 15, out2 - + lines2 = getopenfiles(out2) + if len(lines2) > config._numfiles + 1: + error = [] + error.append("***** %s FD leackage detected" % + (len(lines2)-config._numfiles)) + error.extend(lines2) + error.append(error[0]) + # update numfile so that the overall test run continuess + config._numfiles = len(lines2) + raise AssertionError("\n".join(error)) def pytest_runtest_setup(item): item._oldir = py.path.local() -def pytest_runtest_teardown(item): +def pytest_runtest_teardown(item, __multicall__): item._oldir.chdir() + if hasattr(item.config, '_numfiles'): + x = __multicall__.execute() + check_open_files(item.config) + return x def pytest_generate_tests(metafunc): multi = getattr(metafunc.function, 'multi', None) diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_capture.py --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -16,7 +16,6 @@ def test_configure_per_fspath(self, testdir): config = testdir.parseconfig(testdir.tmpdir) - assert config.getvalue("capture") is None capman = CaptureManager() hasfd = hasattr(os, 'dup') if hasfd: @@ -53,6 +52,7 @@ capman.resumecapture(method) out, err = capman.suspendcapture() assert not out and not err + capman.reset_capturings() finally: capouter.reset() @@ -60,20 +60,23 @@ def test_juggle_capturings(self, testdir): capouter = py.io.StdCaptureFD() try: - config = testdir.parseconfig(testdir.tmpdir) + #config = testdir.parseconfig(testdir.tmpdir) capman = CaptureManager() - capman.resumecapture("fd") - pytest.raises(ValueError, 'capman.resumecapture("fd")') - pytest.raises(ValueError, 'capman.resumecapture("sys")') - os.write(1, "hello\n".encode('ascii')) - out, err = capman.suspendcapture() - assert out == "hello\n" - capman.resumecapture("sys") - os.write(1, "hello\n".encode('ascii')) - py.builtin.print_("world", file=sys.stderr) - out, err = capman.suspendcapture() - assert not out - assert err == "world\n" + try: + capman.resumecapture("fd") + pytest.raises(ValueError, 'capman.resumecapture("fd")') + pytest.raises(ValueError, 'capman.resumecapture("sys")') + os.write(1, "hello\n".encode('ascii')) + out, err = capman.suspendcapture() + assert out == "hello\n" + capman.resumecapture("sys") + os.write(1, "hello\n".encode('ascii')) + py.builtin.print_("world", file=sys.stderr) + out, err = capman.suspendcapture() + assert not out + assert err == "world\n" + finally: + capman.reset_capturings() finally: capouter.reset() diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_collection.py --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -313,7 +313,8 @@ def test_collect_topdir(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) - config = testdir.parseconfigure(id) + # XXX migrate to inline_genitems? (see below) + config = testdir.parseconfig(id) topdir = testdir.tmpdir rcol = Session(config) assert topdir == rcol.fspath @@ -328,15 +329,9 @@ def test_collect_protocol_single_function(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) - config = testdir.parseconfigure(id) topdir = testdir.tmpdir - rcol = Session(config) - assert topdir == rcol.fspath - hookrec = testdir.getreportrecorder(config) - rcol.perform_collect() - items = rcol.items - assert len(items) == 1 - item = items[0] + items, hookrec = testdir.inline_genitems(id) + item, = items assert item.name == "test_func" newid = item.nodeid assert newid == id @@ -363,10 +358,7 @@ p.basename + "::TestClass::()", normid, ]: - config = testdir.parseconfigure(id) - rcol = Session(config=config) - rcol.perform_collect() - items = rcol.items + items, hookrec = testdir.inline_genitems(id) assert len(items) == 1 assert items[0].name == "test_method" newid = items[0].nodeid @@ -388,11 +380,7 @@ """ % p.basename) id = p.basename - config = testdir.parseconfigure(id) - rcol = Session(config) - hookrec = testdir.getreportrecorder(config) - rcol.perform_collect() - items = rcol.items + items, hookrec = testdir.inline_genitems(id) py.std.pprint.pprint(hookrec.hookrecorder.calls) assert len(items) == 2 hookrec.hookrecorder.contains([ @@ -413,11 +401,8 @@ aaa = testdir.mkpydir("aaa") test_aaa = aaa.join("test_aaa.py") p.move(test_aaa) - config = testdir.parseconfigure() - rcol = Session(config) - hookrec = testdir.getreportrecorder(config) - rcol.perform_collect() - items = rcol.items + + items, hookrec = testdir.inline_genitems() assert len(items) == 1 py.std.pprint.pprint(hookrec.hookrecorder.calls) hookrec.hookrecorder.contains([ @@ -437,11 +422,8 @@ p.move(test_bbb) id = "." - config = testdir.parseconfigure(id) - rcol = Session(config) - hookrec = testdir.getreportrecorder(config) - rcol.perform_collect() - items = rcol.items + + items, hookrec = testdir.inline_genitems(id) assert len(items) == 2 py.std.pprint.pprint(hookrec.hookrecorder.calls) hookrec.hookrecorder.contains([ @@ -455,19 +437,13 @@ def test_serialization_byid(self, testdir): p = testdir.makepyfile("def test_func(): pass") - config = testdir.parseconfigure() - rcol = Session(config) - rcol.perform_collect() - items = rcol.items + items, hookrec = testdir.inline_genitems() assert len(items) == 1 item, = items - rcol.config.pluginmanager.unregister(name="session") - newcol = Session(config) - item2, = newcol.perform_collect([item.nodeid], genitems=False) + items2, hookrec = testdir.inline_genitems(item.nodeid) + item2, = items2 assert item2.name == item.name assert item2.fspath == item.fspath - item2b, = newcol.perform_collect([item.nodeid], genitems=False) - assert item2b == item2 def test_find_byid_without_instance_parents(self, testdir): p = testdir.makepyfile(""" @@ -476,10 +452,7 @@ pass """) arg = p.basename + ("::TestClass::test_method") - config = testdir.parseconfigure(arg) - rcol = Session(config) - rcol.perform_collect() - items = rcol.items + items, hookrec = testdir.inline_genitems(arg) assert len(items) == 1 item, = items assert item.nodeid.endswith("TestClass::()::test_method") @@ -487,7 +460,7 @@ class Test_getinitialnodes: def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") - config = testdir.reparseconfig([x]) + config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == 'x.py' @@ -502,7 +475,7 @@ subdir = tmpdir.join("subdir") x = subdir.ensure("x.py") subdir.ensure("__init__.py") - config = testdir.reparseconfig([x]) + config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == 'subdir/x.py' @@ -528,12 +501,6 @@ assert hash(i) != hash(j) assert i != j - def test_root_conftest_syntax_error(self, testdir): - # do we want to unify behaviour with - # test_subdir_conftest_error? - p = testdir.makepyfile(conftest="raise SyntaxError\n") - pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath()) - def test_example_items1(self, testdir): p = testdir.makepyfile(''' def testone(): @@ -597,6 +564,6 @@ res.stdout.fnmatch_lines([ "*1 passed*", ]) - + diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_config.py --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,9 +1,9 @@ import py, pytest -from _pytest.config import getcfg, Config +from _pytest.config import getcfg class TestParseIni: - def test_getcfg_and_config(self, tmpdir): + def test_getcfg_and_config(self, testdir, tmpdir): sub = tmpdir.mkdir("sub") sub.chdir() tmpdir.join("setup.cfg").write(py.code.Source(""" @@ -12,25 +12,23 @@ """)) cfg = getcfg([sub], ["setup.cfg"]) assert cfg['name'] == "value" - config = Config() - config._preparse([sub]) + config = testdir.parseconfigure(sub) assert config.inicfg['name'] == 'value' def test_getcfg_empty_path(self, tmpdir): cfg = getcfg([''], ['setup.cfg']) #happens on py.test "" - def test_append_parse_args(self, tmpdir): + def test_append_parse_args(self, testdir, tmpdir): tmpdir.join("setup.cfg").write(py.code.Source(""" [pytest] addopts = --verbose """)) - config = Config() - config.parse([tmpdir]) + config = testdir.parseconfig(tmpdir) assert config.option.verbose - config = Config() - args = [tmpdir,] - config._preparse(args, addopts=False) - assert len(args) == 1 + #config = testdir.Config() + #args = [tmpdir,] + #config._preparse(args, addopts=False) + #assert len(args) == 1 def test_tox_ini_wrong_version(self, testdir): p = testdir.makefile('.ini', tox=""" @@ -49,8 +47,7 @@ [pytest] minversion = 1.0 """)) - config = Config() - config.parse([testdir.tmpdir]) + config = testdir.parseconfig() assert config.getini("minversion") == "1.0" def test_toxini_before_lower_pytestini(self, testdir): @@ -63,8 +60,7 @@ [pytest] minversion = 1.5 """)) - config = Config() - config.parse([sub]) + config = testdir.parseconfigure(sub) assert config.getini("minversion") == "2.0" @pytest.mark.xfail(reason="probably not needed") @@ -77,10 +73,10 @@ """) result = testdir.runpytest("--confcutdir=.") assert result.ret == 0 - + class TestConfigCmdlineParsing: def test_parsing_again_fails(self, testdir): - config = testdir.reparseconfig([testdir.tmpdir]) + config = testdir.parseconfig() pytest.raises(AssertionError, "config.parse([])") @@ -101,7 +97,7 @@ assert config.getvalue("x") == 1 assert config.getvalue("x", o.join('sub')) == 2 pytest.raises(KeyError, "config.getvalue('y')") - config = testdir.reparseconfig([str(o.join('sub'))]) + config = testdir.parseconfigure(str(o.join('sub'))) assert config.getvalue("x") == 2 assert config.getvalue("y") == 3 assert config.getvalue("x", o) == 1 @@ -127,18 +123,18 @@ def test_config_overwrite(self, testdir): o = testdir.tmpdir o.ensure("conftest.py").write("x=1") - config = testdir.reparseconfig([str(o)]) + config = testdir.parseconfig(str(o)) assert config.getvalue('x') == 1 config.option.x = 2 assert config.getvalue('x') == 2 - config = testdir.reparseconfig([str(o)]) + config = testdir.parseconfig([str(o)]) assert config.getvalue('x') == 1 def test_getconftest_pathlist(self, testdir, tmpdir): somepath = tmpdir.join("x", "y", "z") p = tmpdir.join("conftest.py") p.write("pathlist = ['.', %r]" % str(somepath)) - config = testdir.reparseconfig([p]) + config = testdir.parseconfigure(p) assert config._getconftest_pathlist('notexist') is None pl = config._getconftest_pathlist('pathlist') print(pl) diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_core.py --- a/testing/test_core.py +++ b/testing/test_core.py @@ -332,17 +332,6 @@ "*did not find*sys*" ]) - def test_do_option_conftestplugin(self, testdir): - p = testdir.makepyfile(""" - def pytest_addoption(parser): - parser.addoption('--test123', action="store_true") - """) - config = testdir.Config() - config._conftest.importconftest(p) - print(config.pluginmanager.getplugins()) - config.parse([]) - assert not config.option.test123 - def test_namespace_early_from_import(self, testdir): p = testdir.makepyfile(""" from pytest import Item @@ -370,9 +359,7 @@ ]) def test_do_option_postinitialize(self, testdir): - config = testdir.Config() - config.parse([]) - config.pluginmanager.do_configure(config=config) + config = testdir.parseconfigure() assert not hasattr(config.option, 'test123') p = testdir.makepyfile(""" def pytest_addoption(parser): @@ -640,7 +627,7 @@ log2("seen") tags, args = l2[0] assert args == ("seen",) - + def test_setmyprocessor(self): from _pytest.core import TagTracer diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_mark.py --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -189,58 +189,6 @@ ]) -class Test_genitems: - def test_check_collect_hashes(self, testdir): - p = testdir.makepyfile(""" - def test_1(): - pass - - def test_2(): - pass - """) - p.copy(p.dirpath(p.purebasename + "2" + ".py")) - items, reprec = testdir.inline_genitems(p.dirpath()) - assert len(items) == 4 - for numi, i in enumerate(items): - for numj, j in enumerate(items): - if numj != numi: - assert hash(i) != hash(j) - assert i != j - - def test_root_conftest_syntax_error(self, testdir): - # do we want to unify behaviour with - # test_subdir_conftest_error? - p = testdir.makepyfile(conftest="raise SyntaxError\n") - pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath()) - - def test_example_items1(self, testdir): - p = testdir.makepyfile(''' - def testone(): - pass - - class TestX: - def testmethod_one(self): - pass - - class TestY(TestX): - pass - ''') - items, reprec = testdir.inline_genitems(p) - assert len(items) == 3 - assert items[0].name == 'testone' - assert items[1].name == 'testmethod_one' - assert items[2].name == 'testmethod_one' - - # let's also test getmodpath here - assert items[0].getmodpath() == "testone" - assert items[1].getmodpath() == "TestX.testmethod_one" - assert items[2].getmodpath() == "TestY.testmethod_one" - - s = items[0].getmodpath(stopatmodule=False) - assert s.endswith("test_example_items1.testone") - print(s) - - class TestKeywordSelection: def test_select_simple(self, testdir): file_test = testdir.makepyfile(""" diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_python.py --- a/testing/test_python.py +++ b/testing/test_python.py @@ -257,7 +257,7 @@ assert hasattr(modcol.obj, 'test_func') def test_function_equality(self, testdir, tmpdir): - config = testdir.reparseconfig() + config = testdir.parseconfigure() session = testdir.Session(config) f1 = pytest.Function(name="name", config=config, args=(1,), callobj=isinstance, session=session) @@ -279,7 +279,7 @@ assert not f1 != f1_b def test_function_equality_with_callspec(self, testdir, tmpdir): - config = testdir.reparseconfig() + config = testdir.parseconfigure() class callspec1: param = 1 funcargs = {} @@ -783,7 +783,7 @@ req2 = funcargs.FuncargRequest(item2) ret2 = req2.cached_setup(setup, scope="class") assert ret2 == "hello" - + req3 = funcargs.FuncargRequest(item3) ret3a = req3.cached_setup(setup, scope="class") ret3b = req3.cached_setup(setup, scope="class") @@ -1320,7 +1320,7 @@ "*CheckMyApp*", "*check_meth*", ]) - + result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ @@ -1354,7 +1354,7 @@ Function = MyFunction class MyClass(pytest.Class): Instance = MyInstance - + def pytest_pycollect_makeitem(collector, name, obj): if name.startswith("MyTestClass"): return MyClass(name, parent=collector) diff -r f025865402ccf7cc929c32275582c31368816393 -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 testing/test_tmpdir.py --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -54,17 +54,6 @@ assert b2.check() assert not h.check() - def test_reparse(self, testdir): - config2 = testdir.reparseconfig([]) - config3 = testdir.reparseconfig([]) - assert config2.basetemp != config3.basetemp - assert not config2.basetemp.relto(config3.basetemp) - assert not config3.basetemp.relto(config2.basetemp) - - def test_reparse_filename_too_long(self, testdir): - config = testdir.reparseconfig(["--basetemp=%s" % ("123"*300)]) - - def test_basetemp(testdir): mytemp = testdir.tmpdir.mkdir("mytemp") p = testdir.makepyfile(""" Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Mon Nov 7 19:28:37 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Mon, 07 Nov 2011 18:28:37 -0000 Subject: [py-svn] commit/pytest: hpk42: fix py3 failure Message-ID: <20111107182837.31018.3738@bitbucket05.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/ed2419f69456/ changeset: ed2419f69456 user: hpk42 date: 2011-11-07 19:28:30 summary: fix py3 failure affected #: 1 file diff -r 40ed9c90eed1b6ecfc0bfefc301bfcbf8562efb7 -r ed2419f694561257b1e59becf63ad241f203ecc6 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -360,7 +360,7 @@ return items, rec[0] def parseconfig(self, *args): - args = map(str, args) + args = [str(x) for x in args] for x in args: if str(x).startswith('--basetemp'): break Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Mon Nov 7 22:02:18 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Mon, 07 Nov 2011 21:02:18 -0000 Subject: [py-svn] commit/pytest: hpk42: use os.symlink to make things work on windows/py32 Message-ID: <20111107210218.17660.70250@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/85bbda29aa07/ changeset: 85bbda29aa07 user: hpk42 date: 2011-11-07 22:02:07 summary: use os.symlink to make things work on windows/py32 affected #: 1 file diff -r ed2419f694561257b1e59becf63ad241f203ecc6 -r 85bbda29aa07273d7091ab2c0a7ff9c3be505ae6 testing/test_tmpdir.py --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -1,4 +1,5 @@ import py, pytest +import os from _pytest.tmpdir import pytest_funcarg__tmpdir, TempdirHandler from _pytest.python import FuncargRequest @@ -69,7 +70,7 @@ def test_tmpdir_keeps_symlinks(testdir): realtemp = testdir.tmpdir.mkdir("myrealtemp") linktemp = testdir.tmpdir.join("symlinktemp") - linktemp.mksymlinkto(realtemp) + os.symlink(str(realtemp), str(linktemp)) p = testdir.makepyfile(""" def test_1(tmpdir): import os Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Mon Nov 7 23:00:21 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Mon, 07 Nov 2011 22:00:21 -0000 Subject: [py-svn] commit/pytest: hpk42: skip the symlink test on windows, win32/py32 does not support it without privs Message-ID: <20111107220021.17660.13483@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/e29e0af293e6/ changeset: e29e0af293e6 user: hpk42 date: 2011-11-07 23:00:12 summary: skip the symlink test on windows, win32/py32 does not support it without privs affected #: 1 file diff -r 85bbda29aa07273d7091ab2c0a7ff9c3be505ae6 -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe testing/test_tmpdir.py --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -66,11 +66,11 @@ assert result.ret == 0 assert mytemp.join('hello').check() - at pytest.mark.skipif("not hasattr(os, 'symlink')") + at pytest.mark.skipif("not hasattr(py.path.local, 'mksymlinkto')") def test_tmpdir_keeps_symlinks(testdir): realtemp = testdir.tmpdir.mkdir("myrealtemp") linktemp = testdir.tmpdir.join("symlinktemp") - os.symlink(str(realtemp), str(linktemp)) + linktemp.mksymlinkto(realtemp) p = testdir.makepyfile(""" def test_1(tmpdir): import os Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 18:54:58 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 17:54:58 -0000 Subject: [py-svn] commit/pytest: 2 new changesets Message-ID: <20111108175458.9657.69363@bitbucket03.managed.contegix.com> 2 new commits in pytest: https://bitbucket.org/hpk42/pytest/changeset/c6cba3e09301/ changeset: c6cba3e09301 user: hpk42 date: 2011-11-08 18:20:56 summary: introduce --durations=N showing slowest test executions affected #: 10 files diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,8 @@ Changes between 2.1.3 and [next version] ---------------------------------------- +- new feature to help optimizing your tests: --durations=N option for + displaying N slowest test calls - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.1.4.dev3' +__version__ = '2.2.0.dev1' diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -549,10 +549,17 @@ def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): return [x.report for x in self.getcalls(names)] - def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None): + def matchreport(self, inamepart="", + names="pytest_runtest_logreport pytest_collectreport", when=None): """ return a testreport whose dotted import path matches """ l = [] for rep in self.getreports(names=names): + try: + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + except AttributeError: + pass if when and getattr(rep, 'when', None) != when: continue if not inamepart or inamepart in rep.nodeid.split("::"): diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -14,6 +14,38 @@ # # pytest plugin hooks +def pytest_addoption(parser): + group = parser.getgroup("terminal reporting", "reporting", after="general") + group.addoption('--durations', + action="store", type="int", dest="durations", default=None, metavar="N", + help="show N slowest setup/test durations (N=0 for all)."), + +def pytest_terminal_summary(terminalreporter): + durations = terminalreporter.config.option.durations + if durations is None: + return + tr = terminalreporter + duration2rep = {} + for key, replist in tr.stats.items(): + if key == "deselected": + continue + for rep in replist: + duration2rep[rep.duration] = rep + if not duration2rep: + return + d2 = duration2rep.items() + d2.sort() + d2.reverse() + if not durations: + tr.write_sep("=", "slowest test durations") + else: + tr.write_sep("=", "slowest %s test durations" % durations) + d2 = d2[:durations] + + for duration, rep in d2: + nodeid = rep.nodeid.replace("::()::", "::") + tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid)) + def pytest_sessionstart(session): session._setupstate = SetupState() @@ -185,13 +217,13 @@ #: a name -> value dictionary containing all keywords and #: markers associated with a test invocation. self.keywords = keywords - + #: test outcome, always one of "passed", "failed", "skipped". self.outcome = outcome #: None or a failure representation. self.longrepr = longrepr - + #: one of 'setup', 'call', 'teardown' to indicate runtest phase. self.when = when diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/announce/release-2.2.0.txt --- /dev/null +++ b/doc/announce/release-2.2.0.txt @@ -0,0 +1,21 @@ +py.test 2.2.0: new test duration profiling and bug fixes +=========================================================================== + +pytest-2.2.0 is a backward compatible release of the popular +py.test testing tool. It introduces the new "--duration=N" option +showing the N slowest test execution or setup/teardown calls. The +release also contains a few fixes and some cleanup of pytest's own test +suite allowing it to run on a wider range of environments. + +For general information, see extensive docs with examples here: + + http://pytest.org/ + +If you want to install or upgrade pytest you might just type:: + + pip install -U pytest # or + easy_install -U pytest + +best, +holger krekel + diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/example/simple.txt --- a/doc/example/simple.txt +++ b/doc/example/simple.txt @@ -299,3 +299,30 @@ collecting ... collected 0 items ============================= in 0.00 seconds ============================= + +profiling test duration +-------------------------- + +.. regendoc:wipe + +.. versionadded: 2.2 + +If you have a slow running large test suite you might want to find +out which tests are slowest. Let's make an artifical test suite:: + + # content of test_some_are_slow.py + + import time + + def test_funcfast(): + pass + + def test_funcslow1(): + time.sleep(0.1) + + def test_funcslow2(): + time.sleep(0.2) + +Now we can profile which test functions execute slowest:: + + $ py.test --durations=3 diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/index.txt --- a/doc/index.txt +++ b/doc/index.txt @@ -25,6 +25,7 @@ - **supports functional testing and complex test setups** + - (new in 2.2) :ref:`durations` - advanced :ref:`skip and xfail` - generic :ref:`marking and test selection ` - can :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/usage.txt --- a/doc/usage.txt +++ b/doc/usage.txt @@ -98,6 +98,18 @@ In previous versions you could only enter PDB tracing if you disable capturing on the command line via ``py.test -s``. +.. _durations: + +Profiling test execution duration +------------------------------------- + +.. versionadded: 2.2 + +To get a list of the slowest 10 test durations:: + + py.test --durations=10 + + Creating JUnitXML format files ---------------------------------------------------- diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.1.4.dev3', + version='2.2.0.dev1', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -465,3 +465,48 @@ "*1 failed*", ]) +def test_duration_test(testdir): + testdir.makepyfile(""" + import time + frag = 0.01 + def test_2(): + time.sleep(frag*2) + def test_1(): + time.sleep(frag) + def test_3(): + time.sleep(frag*3) + """) + result = testdir.runpytest("--durations=10") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_3*", + "*call*test_2*", + "*call*test_1*", + ]) + result = testdir.runpytest("--durations=2") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_3*", + "*call*test_2*", + ]) + assert "test_1" not in result.stdout.str() + result = testdir.runpytest("--durations=0") + assert result.ret == 0 + for x in "123": + for y in 'call',: #'setup', 'call', 'teardown': + l = [] + for line in result.stdout.lines: + if ("test_%s" % x) in line and y in line: + break + else: + raise AssertionError("not found %s %s" % (x,y)) + + result = testdir.runpytest("--durations=2", "-k test_1") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_1*", + ]) + https://bitbucket.org/hpk42/pytest/changeset/70a660cea75e/ changeset: 70a660cea75e user: hpk42 date: 2011-11-08 18:53:46 summary: make --durations also show the execution times of setup/teardown calls. This requires a slight incompatibility - pytest_runtest_logreport now sees setup/teardown reports even if the tests passed. affected #: 12 files diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,8 +1,8 @@ -Changes between 2.1.3 and [next version] +Changes between 2.1.3 and XXX 2.2.0 ---------------------------------------- - new feature to help optimizing your tests: --durations=N option for - displaying N slowest test calls + displaying N slowest test calls and setup/teardown methods. - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev1' +__version__ = '2.2.0.dev2' diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/hookspec.py --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -149,7 +149,8 @@ pytest_runtest_makereport.firstresult = True def pytest_runtest_logreport(report): - """ process item test report. """ + """ process a test setup/call/teardown report relating to + the respective phase of executing a test. """ # special handling for final teardown - somewhat internal for now def pytest__teardown_final(session): diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -166,7 +166,8 @@ def pytest_runtest_logreport(self, report): if report.passed: - self.append_pass(report) + if report.when == "call": # ignore setup/teardown + self.append_pass(report) elif report.failed: if report.when != "call": self.append_error(report) diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -334,8 +334,8 @@ l = list(args) + [p] reprec = self.inline_run(*l) reports = reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 1, reports - return reports[0] + assert len(reports) == 3, reports # setup/call/teardown + return reports[1] def inline_genitems(self, *args): return self.inprocess_run(list(args) + ['--collectonly']) diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -63,6 +63,8 @@ self.write_log_entry(testpath, lettercode, longrepr) def pytest_runtest_logreport(self, report): + if report.when != "call" and report.passed: + return res = self.config.hook.pytest_report_teststatus(report=report) code = res[1] if code == 'x': diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -26,25 +26,36 @@ return tr = terminalreporter duration2rep = {} + alldurations = 0.0 for key, replist in tr.stats.items(): if key == "deselected": continue for rep in replist: duration2rep[rep.duration] = rep + alldurations += rep.duration if not duration2rep: return - d2 = duration2rep.items() + d2 = remaining = duration2rep.items() d2.sort() d2.reverse() if not durations: tr.write_sep("=", "slowest test durations") else: tr.write_sep("=", "slowest %s test durations" % durations) + remaining = d2[durations:] d2 = d2[:durations] + assert (alldurations/100) > 0 for duration, rep in d2: nodeid = rep.nodeid.replace("::()::", "::") - tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid)) + percent = rep.duration / (alldurations / 100) + tr.write_line("%02.2fs %-02.2f%% %s %s" % + (duration, percent, rep.when, nodeid)) + if remaining: + remsum = sum(map(lambda x: x[0], remaining)) + tr.write_line("%02.2fs %-02.2f%% remaining in %d test phases" %( + remsum, remsum / (alldurations / 100), len(remaining))) + def pytest_sessionstart(session): session._setupstate = SetupState() @@ -110,7 +121,7 @@ call = call_runtest_hook(item, when) hook = item.ihook report = hook.pytest_runtest_makereport(item=item, call=call) - if log and (when == "call" or not report.passed): + if log: hook.pytest_runtest_logreport(report=report) return report diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/terminal.py --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -426,9 +426,10 @@ keys.append(key) parts = [] for key in keys: - val = self.stats.get(key, None) - if val: - parts.append("%d %s" %(len(val), key)) + if key: # setup/teardown reports have an empty key, ignore them + val = self.stats.get(key, None) + if val: + parts.append("%d %s" %(len(val), key)) line = ", ".join(parts) # XXX coloring msg = "%s in %.2f seconds" %(line, session_duration) diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -1,7 +1,7 @@ py.test 2.2.0: new test duration profiling and bug fixes =========================================================================== -pytest-2.2.0 is a backward compatible release of the popular +pytest-2.2.0 is a quite backward compatible release of the popular py.test testing tool. It introduces the new "--duration=N" option showing the N slowest test execution or setup/teardown calls. The release also contains a few fixes and some cleanup of pytest's own test @@ -16,6 +16,19 @@ pip install -U pytest # or easy_install -U pytest +incompatible change +------------------------------------ + +* You need a new version of the pytest-xdist plugin (1.7) for distributing + test runs. + +* Also other plugins might need an upgrade if they implement + the ``pytest_runtest_logreport`` hook which now is called unconditionally + for the setup/teardown fixture phases of a test. You can just choose to + ignore them by inserting "if rep.when != 'call': return". Note that + most code probably "just" works because the hook was already called + for failing setup/teardown phases of a test. + best, holger krekel diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev1', + version='2.2.0.dev2', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -484,12 +484,15 @@ "*call*test_2*", "*call*test_1*", ]) + assert "remaining in" not in result.stdout.str() + result = testdir.runpytest("--durations=2") assert result.ret == 0 result.stdout.fnmatch_lines([ "*durations*", "*call*test_3*", "*call*test_2*", + "*s*%*remaining in 7 test phases", ]) assert "test_1" not in result.stdout.str() result = testdir.runpytest("--durations=0") @@ -503,6 +506,28 @@ else: raise AssertionError("not found %s %s" % (x,y)) +def test_duration_test_with_fixture(testdir): + testdir.makepyfile(""" + import time + frag = 0.01 + def setup_function(func): + time.sleep(frag * 3) + + def test_1(): + time.sleep(frag*2) + def test_2(): + time.sleep(frag) + """) + result = testdir.runpytest("--durations=10") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*setup*test_1*", + "*setup*test_2*", + "*call*test_1*", + "*call*test_2*", + ]) + result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 0 result.stdout.fnmatch_lines([ diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 testing/test_mark.py --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -77,7 +77,7 @@ assert hasattr(test_hello, 'hello') """) result = testdir.runpytest(p) - result.stdout.fnmatch_lines(["*passed*"]) + result.stdout.fnmatch_lines(["*1 passed*"]) def test_mark_per_module(self, testdir): item = testdir.getitem(""" Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 18:55:22 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 17:55:22 -0000 Subject: [py-svn] commit/pytest-xdist: hpk42: adapt to pytest-2.2.0.dev2 Message-ID: <20111108175522.9654.72459@bitbucket03.managed.contegix.com> 1 new commit in pytest-xdist: https://bitbucket.org/hpk42/pytest-xdist/changeset/017a73903b6f/ changeset: 017a73903b6f user: hpk42 date: 2011-11-08 18:54:26 summary: adapt to pytest-2.2.0.dev2 affected #: 8 files diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,8 @@ +1.7 +------------------------- + +- fix incompatibilities with pytest-2.2.0 + 1.6 ------------------------- diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c setup.py --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name="pytest-xdist", - version='1.6', + version='1.7.dev1', description='py.test xdist plugin for distributed testing and loop-on-failing modes', long_description=open('README.txt').read(), license='GPLv2 or later', @@ -13,7 +13,7 @@ packages = ['xdist'], entry_points = {'pytest11': ['xdist = xdist.plugin'],}, zip_safe=False, - install_requires = ['execnet>=1.0.8', 'pytest>2.0.2'], + install_requires = ['execnet>=1.0.8', 'pytest>=2.2.0.dev2'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', @@ -27,4 +27,4 @@ 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], -) \ No newline at end of file +) diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c testing/test_boxed.py --- a/testing/test_boxed.py +++ b/testing/test_boxed.py @@ -16,13 +16,13 @@ class TestOptionEffects: def test_boxed_option_default(self, testdir): tmpdir = testdir.tmpdir.ensure("subdir", dir=1) - config = testdir.reparseconfig() + config = testdir.parseconfig() assert not config.option.boxed py.test.importorskip("execnet") - config = testdir.reparseconfig(['-d', tmpdir]) + config = testdir.parseconfig('-d', tmpdir) assert not config.option.boxed def test_is_not_boxed_by_default(self, testdir): - config = testdir.reparseconfig([testdir.tmpdir]) + config = testdir.parseconfig(testdir.tmpdir) assert not config.option.boxed diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c testing/test_remote.py --- a/testing/test_remote.py +++ b/testing/test_remote.py @@ -80,7 +80,7 @@ py.test.xfail("hello") """) reports = reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 6 + assert len(reports) == 17 # with setup/teardown "passed" reports for rep in reports: d = serialize_report(rep) check_marshallable(d) @@ -138,6 +138,7 @@ ids = ev.kwargs['ids'] assert len(ids) == 1 slave.sendcommand("runtests", ids=ids) + ev = slave.popevent("testreport") # setup ev = slave.popevent("testreport") assert ev.name == "testreport" rep = unserialize_report(ev.name, ev.kwargs['data']) @@ -194,15 +195,12 @@ ids = ev.kwargs['ids'] assert len(ids) == 2 slave.sendcommand("runtests_all", ) - ev = slave.popevent("testreport") - assert ev.name == "testreport" - rep = unserialize_report(ev.name, ev.kwargs['data']) - assert rep.nodeid.endswith("::test_func") - ev = slave.popevent("testreport") - assert ev.name == "testreport" - rep = unserialize_report(ev.name, ev.kwargs['data']) - assert rep.nodeid.endswith("::test_func2") - assert rep.passed + for func in "::test_func", "::test_func2": + for i in range(3): # setup/call/teardown + ev = slave.popevent("testreport") + assert ev.name == "testreport" + rep = unserialize_report(ev.name, ev.kwargs['data']) + assert rep.nodeid.endswith(func) slave.sendcommand("shutdown") ev = slave.popevent("slavefinished") assert 'slaveoutput' in ev.kwargs diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c testing/test_slavemanage.py --- a/testing/test_slavemanage.py +++ b/testing/test_slavemanage.py @@ -135,7 +135,7 @@ @py.test.mark.xfail def test_rsync_roots_no_roots(self, testdir, mysetup): mysetup.source.ensure("dir1", "file1").write("hello") - config = testdir.reparseconfig([source]) + config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) #assert nodemanager.config.topdir == source == config.topdir nodemanager.makegateways() @@ -179,7 +179,7 @@ [pytest] rsyncdirs=dir1/dir2 """)) - config = testdir.reparseconfig([source]) + config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.makegateways() nodemanager.rsync_roots() @@ -198,7 +198,7 @@ rsyncdirs = dir1 dir5 rsyncignore = dir1/dir2 dir5/dir6 """)) - config = testdir.reparseconfig([source]) + config = testdir.parseconfig(source) nodemanager = NodeManager(config, ["popen//chdir=%s" % dest]) nodemanager.makegateways() nodemanager.rsync_roots() @@ -212,7 +212,7 @@ specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) - config = testdir.reparseconfig([source]) + config = testdir.parseconfig(source) nodemanager = NodeManager(config, specs) nodemanager.makegateways() nodemanager.rsync_roots() diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c tox.ini --- a/tox.ini +++ b/tox.ini @@ -6,7 +6,7 @@ [testenv] changedir=testing -deps=:testrun:pytest +deps=:testrun:pytest>=2.2.0.dev2 commands= py.test --junitxml={envlogdir}/junit-{envname}.xml [] [testenv:py31] diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c xdist/__init__.py --- a/xdist/__init__.py +++ b/xdist/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '1.6' +__version__ = '1.7.dev1' diff -r 52e15109500c7b43b244961b6060198207fb998f -r 017a73903b6f50f3ad201be5c8b7465111e5026c xdist/dsession.py --- a/xdist/dsession.py +++ b/xdist/dsession.py @@ -271,7 +271,7 @@ self.terminal.write_line("") self.terminal.write_line("scheduling tests via %s" %( self.sched.__class__.__name__)) - + self.sched.init_distribute() def slave_logstart(self, node, nodeid, location): @@ -279,8 +279,9 @@ nodeid=nodeid, location=location) def slave_testreport(self, node, rep): - if rep.when in ("setup", "call"): - self.sched.remove_item(node, rep.nodeid) + if not (rep.passed and rep.when != "call"): + if rep.when in ("setup", "call"): + self.sched.remove_item(node, rep.nodeid) #self.report_line("testreport %s: %s" %(rep.id, rep.status)) rep.node = node self.config.hook.pytest_runtest_logreport(report=rep) Repository URL: https://bitbucket.org/hpk42/pytest-xdist/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 19:12:25 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 18:12:25 -0000 Subject: [py-svn] commit/pytest: hpk42: separate out the duration tests Message-ID: <20111108181225.12591.65935@bitbucket13.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/bc7d4c092207/ changeset: bc7d4c092207 user: hpk42 date: 2011-11-08 19:12:16 summary: separate out the duration tests affected #: 1 file diff -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 -r bc7d4c092207173c09d504200eac61e9809e6d24 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -465,8 +465,8 @@ "*1 failed*", ]) -def test_duration_test(testdir): - testdir.makepyfile(""" +class TestDurations: + source = """ import time frag = 0.01 def test_2(): @@ -475,39 +475,57 @@ time.sleep(frag) def test_3(): time.sleep(frag*3) - """) - result = testdir.runpytest("--durations=10") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*durations*", - "*call*test_3*", - "*call*test_2*", - "*call*test_1*", - ]) - assert "remaining in" not in result.stdout.str() + """ - result = testdir.runpytest("--durations=2") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*durations*", - "*call*test_3*", - "*call*test_2*", - "*s*%*remaining in 7 test phases", - ]) - assert "test_1" not in result.stdout.str() - result = testdir.runpytest("--durations=0") - assert result.ret == 0 - for x in "123": - for y in 'call',: #'setup', 'call', 'teardown': - l = [] - for line in result.stdout.lines: - if ("test_%s" % x) in line and y in line: - break - else: - raise AssertionError("not found %s %s" % (x,y)) + def test_calls(self, testdir): + testdir.makepyfile(self.source) + result = testdir.runpytest("--durations=10") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_3*", + "*call*test_2*", + "*call*test_1*", + ]) + assert "remaining in" not in result.stdout.str() -def test_duration_test_with_fixture(testdir): - testdir.makepyfile(""" + def test_calls_show_2(self, testdir): + testdir.makepyfile(self.source) + result = testdir.runpytest("--durations=2") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_3*", + "*call*test_2*", + "*s*%*remaining in 7 test phases", + ]) + assert "test_1" not in result.stdout.str() + + def test_calls_showall(self, testdir): + testdir.makepyfile(self.source) + result = testdir.runpytest("--durations=0") + assert result.ret == 0 + for x in "123": + for y in 'call',: #'setup', 'call', 'teardown': + l = [] + for line in result.stdout.lines: + if ("test_%s" % x) in line and y in line: + break + else: + raise AssertionError("not found %s %s" % (x,y)) + + def test_with_deselected(self, testdir): + testdir.makepyfile(self.source) + result = testdir.runpytest("--durations=2", "-k test_1") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_1*", + ]) + + +class TestDurationWithFixture: + source = """ import time frag = 0.01 def setup_function(func): @@ -517,21 +535,17 @@ time.sleep(frag*2) def test_2(): time.sleep(frag) - """) - result = testdir.runpytest("--durations=10") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*durations*", - "*setup*test_1*", - "*setup*test_2*", - "*call*test_1*", - "*call*test_2*", - ]) + """ + def test_setup_function(self, testdir): + testdir.makepyfile(self.source) + result = testdir.runpytest("--durations=10") + assert result.ret == 0 - result = testdir.runpytest("--durations=2", "-k test_1") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*durations*", - "*call*test_1*", - ]) + result.stdout.fnmatch_lines([ + "*durations*", + "*setup*test_1*", + "*setup*test_2*", + "*call*test_1*", + "*call*test_2*", + ]) Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 19:38:59 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 18:38:59 -0000 Subject: [py-svn] commit/pytest: hpk42: fix py3 compat Message-ID: <20111108183859.9658.85245@bitbucket03.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/a0dedb1ce96b/ changeset: a0dedb1ce96b user: hpk42 date: 2011-11-08 19:37:08 summary: fix py3 compat affected #: 3 files diff -r bc7d4c092207173c09d504200eac61e9809e6d24 -r a0dedb1ce96bd94b63bf97334914da22ec556b49 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev2' +__version__ = '2.2.0.dev3' diff -r bc7d4c092207173c09d504200eac61e9809e6d24 -r a0dedb1ce96bd94b63bf97334914da22ec556b49 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -35,9 +35,10 @@ alldurations += rep.duration if not duration2rep: return - d2 = remaining = duration2rep.items() + d2 = list(duration2rep.items()) d2.sort() d2.reverse() + remaining = [] if not durations: tr.write_sep("=", "slowest test durations") else: diff -r bc7d4c092207173c09d504200eac61e9809e6d24 -r a0dedb1ce96bd94b63bf97334914da22ec556b49 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev2', + version='2.2.0.dev3', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 20:00:50 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 19:00:50 -0000 Subject: [py-svn] commit/pytest: hpk42: fix duration option in case of collection errors Message-ID: <20111108190050.22121.94002@bitbucket12.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/ca6adfab10e9/ changeset: ca6adfab10e9 user: hpk42 date: 2011-11-08 20:00:25 summary: fix duration option in case of collection errors affected #: 4 files diff -r a0dedb1ce96bd94b63bf97334914da22ec556b49 -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev3' +__version__ = '2.2.0.dev4' diff -r a0dedb1ce96bd94b63bf97334914da22ec556b49 -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -28,11 +28,10 @@ duration2rep = {} alldurations = 0.0 for key, replist in tr.stats.items(): - if key == "deselected": - continue for rep in replist: - duration2rep[rep.duration] = rep - alldurations += rep.duration + if hasattr(rep, 'duration'): + duration2rep[rep.duration] = rep + alldurations += rep.duration if not duration2rep: return d2 = list(duration2rep.items()) diff -r a0dedb1ce96bd94b63bf97334914da22ec556b49 -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev3', + version='2.2.0.dev4', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r a0dedb1ce96bd94b63bf97334914da22ec556b49 -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -523,6 +523,16 @@ "*call*test_1*", ]) + def test_with_failing_collection(self, testdir): + testdir.makepyfile(self.source) + testdir.makepyfile(test_collecterror="""xyz""") + result = testdir.runpytest("--durations=2", "-k test_1") + assert result.ret != 0 + result.stdout.fnmatch_lines([ + "*durations*", + "*call*test_1*", + ]) + class TestDurationWithFixture: source = """ Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 22:04:35 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 21:04:35 -0000 Subject: [py-svn] commit/pytest: hpk42: simplify durations output, no percentage, no "remaining" bits Message-ID: <20111108210435.6047.24469@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/b8e508e8e8ba/ changeset: b8e508e8e8ba user: hpk42 date: 2011-11-08 21:57:19 summary: simplify durations output, no percentage, no "remaining" bits affected #: 4 files diff -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 -r b8e508e8e8bacbe70d0061ca402009c4820afb1c _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev4' +__version__ = '2.2.0.dev5' diff -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 -r b8e508e8e8bacbe70d0061ca402009c4820afb1c _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -26,35 +26,31 @@ return tr = terminalreporter duration2rep = {} - alldurations = 0.0 for key, replist in tr.stats.items(): for rep in replist: if hasattr(rep, 'duration'): duration2rep[rep.duration] = rep - alldurations += rep.duration if not duration2rep: return d2 = list(duration2rep.items()) d2.sort() d2.reverse() - remaining = [] + #remaining = [] if not durations: tr.write_sep("=", "slowest test durations") else: tr.write_sep("=", "slowest %s test durations" % durations) - remaining = d2[durations:] + #remaining = d2[durations:] d2 = d2[:durations] - assert (alldurations/100) > 0 for duration, rep in d2: nodeid = rep.nodeid.replace("::()::", "::") - percent = rep.duration / (alldurations / 100) - tr.write_line("%02.2fs %-02.2f%% %s %s" % - (duration, percent, rep.when, nodeid)) - if remaining: - remsum = sum(map(lambda x: x[0], remaining)) - tr.write_line("%02.2fs %-02.2f%% remaining in %d test phases" %( - remsum, remsum / (alldurations / 100), len(remaining))) + tr.write_line("%02.2fs %s %s" % + (duration, rep.when, nodeid)) + #if remaining: + # remsum = sum(map(lambda x: x[0], remaining)) + # tr.write_line("%02.2fs spent in %d remaining test phases" %( + # remsum, len(remaining))) def pytest_sessionstart(session): diff -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 -r b8e508e8e8bacbe70d0061ca402009c4820afb1c setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev4', + version='2.2.0.dev5', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r ca6adfab10e932cf4c5fdcfbe161542f2b6e7339 -r b8e508e8e8bacbe70d0061ca402009c4820afb1c testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -487,7 +487,6 @@ "*call*test_2*", "*call*test_1*", ]) - assert "remaining in" not in result.stdout.str() def test_calls_show_2(self, testdir): testdir.makepyfile(self.source) @@ -497,7 +496,6 @@ "*durations*", "*call*test_3*", "*call*test_2*", - "*s*%*remaining in 7 test phases", ]) assert "test_1" not in result.stdout.str() Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 8 23:07:07 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 22:07:07 -0000 Subject: [py-svn] commit/pytest: hpk42: avoid race condition in test, fix doc link Message-ID: <20111108220707.19198.43585@bitbucket03.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/73760c710e26/ changeset: 73760c710e26 user: hpk42 date: 2011-11-08 23:06:57 summary: avoid race condition in test, fix doc link affected #: 2 files diff -r b8e508e8e8bacbe70d0061ca402009c4820afb1c -r 73760c710e264c56056b9a93c9eeeefbad052e9d doc/announce/index.txt --- a/doc/announce/index.txt +++ b/doc/announce/index.txt @@ -5,6 +5,7 @@ .. toctree:: :maxdepth: 2 + release-2.2.0 release-2.1.3 release-2.1.2 release-2.1.1 diff -r b8e508e8e8bacbe70d0061ca402009c4820afb1c -r 73760c710e264c56056b9a93c9eeeefbad052e9d testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -538,7 +538,6 @@ frag = 0.01 def setup_function(func): time.sleep(frag * 3) - def test_1(): time.sleep(frag*2) def test_2(): @@ -552,7 +551,6 @@ result.stdout.fnmatch_lines([ "*durations*", "*setup*test_1*", - "*setup*test_2*", "*call*test_1*", "*call*test_2*", ]) Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Wed Nov 9 00:04:41 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 08 Nov 2011 23:04:41 -0000 Subject: [py-svn] commit/pytest: hpk42: try to avoid timing/race condition Message-ID: <20111108230441.16663.78937@bitbucket05.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/6a24dc78293d/ changeset: 6a24dc78293d user: hpk42 date: 2011-11-09 00:04:31 summary: try to avoid timing/race condition affected #: 1 file diff -r 73760c710e264c56056b9a93c9eeeefbad052e9d -r 6a24dc78293dda5cd7ba556e4dc454bdb018eb4b testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -468,7 +468,7 @@ class TestDurations: source = """ import time - frag = 0.01 + frag = 0.02 def test_2(): time.sleep(frag*2) def test_1(): @@ -550,8 +550,7 @@ result.stdout.fnmatch_lines([ "*durations*", - "*setup*test_1*", - "*call*test_1*", - "*call*test_2*", + "* setup *test_1*", + "* call *test_1*", ]) Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sat Nov 12 00:03:27 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 11 Nov 2011 23:03:27 -0000 Subject: [py-svn] commit/pytest: 6 new changesets Message-ID: <20111111230327.30005.98188@bitbucket12.managed.contegix.com> 6 new commits in pytest: https://bitbucket.org/hpk42/pytest/changeset/298fbbd8c125/ changeset: 298fbbd8c125 user: hpk42 date: 2011-11-09 12:04:37 summary: fix formatting affected #: 1 file diff -r 6a24dc78293dda5cd7ba556e4dc454bdb018eb4b -r 298fbbd8c125c5ca8ac4808f13f718f0d03965c6 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -17,7 +17,7 @@ def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group.addoption('--durations', - action="store", type="int", dest="durations", default=None, metavar="N", + action="store", type="int", default=None, metavar="N", help="show N slowest setup/test durations (N=0 for all)."), def pytest_terminal_summary(terminalreporter): @@ -35,23 +35,16 @@ d2 = list(duration2rep.items()) d2.sort() d2.reverse() - #remaining = [] if not durations: tr.write_sep("=", "slowest test durations") else: tr.write_sep("=", "slowest %s test durations" % durations) - #remaining = d2[durations:] d2 = d2[:durations] for duration, rep in d2: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %s %s" % (duration, rep.when, nodeid)) - #if remaining: - # remsum = sum(map(lambda x: x[0], remaining)) - # tr.write_line("%02.2fs spent in %d remaining test phases" %( - # remsum, len(remaining))) - def pytest_sessionstart(session): session._setupstate = SetupState() https://bitbucket.org/hpk42/pytest/changeset/40c7458fb0c8/ changeset: 40c7458fb0c8 user: hpk42 date: 2011-11-11 22:33:45 summary: skip pexpect tests on darwin affected #: 1 file diff -r 298fbbd8c125c5ca8ac4808f13f718f0d03965c6 -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -516,6 +516,8 @@ pexpect = py.test.importorskip("pexpect", "2.4") if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine(): pytest.skip("pypy-64 bit not supported") + if sys.platform == "darwin": + pytest.xfail("pexpect does not work reliably on darwin?!") logfile = self.tmpdir.join("spawn.out") child = pexpect.spawn(cmd, logfile=logfile.open("w")) child.timeout = expect_timeout https://bitbucket.org/hpk42/pytest/changeset/7535fc2cb387/ changeset: 7535fc2cb387 user: hpk42 date: 2011-11-11 23:56:06 summary: improve mark.txt document and add new regristration/markers features. (welcome to documentation driven development) affected #: 4 files diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/example/index.txt --- a/doc/example/index.txt +++ b/doc/example/index.txt @@ -18,5 +18,6 @@ simple.txt mysetup.txt parametrize.txt + markers.txt pythoncollection.txt nonpython.txt diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/example/markers.txt --- /dev/null +++ b/doc/example/markers.txt @@ -0,0 +1,83 @@ + +Working with custom markers +================================================= + + +Here are some example using the :ref:`mark` mechanism. + +.. _`adding a custom marker from a plugin`: + +custom marker and command line option to control test runs +---------------------------------------------------------- + +Plugins can provide custom markers and implement specific behaviour +based on it. This is a self-contained example which adds a command +line option and a parametrized test function marker to run tests +specifies via named environments:: + + # content of conftest.py + + import pytest + def pytest_addoption(parser): + parser.addoption("-E", dest="env", action="store", metavar="NAME", + help="only run tests matching the environment NAME.") + + def pytest_configure(config): + # register an additional marker + config.addinivalue_line("markers", + "env(name): mark test to run only on named environment") + + def pytest_runtest_setup(item): + if not isinstance(item, item.Function): + return + if hasattr(item.obj, 'env'): + envmarker = getattr(item.obj, 'env') + envname = envmarker.args[0] + if envname != item.config.option.env: + pytest.skip("test requires env %r" % envname) + +A test file using this local plugin:: + + # content of test_someenv.py + + import pytest + @pytest.mark.env("stage1") + def test_basic_db_operation(): + pass + +and an example invocations specifying a different environment than what +the test needs:: + + $ py.test -E stage2 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 + collecting ... collected 1 items + + test_someenv.py s + + ========================== 1 skipped in 0.02 seconds =========================== + +and here is one that specifies exactly the environment needed:: + + $ py.test -E stage1 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 + collecting ... collected 1 items + + test_someenv.py . + + =========================== 1 passed in 0.02 seconds =========================== + +The ``--markers`` option always gives you a list of available markers:: + + $ py.test --markers + @pytest.mark.env(name): mark test to run only on named environment + + @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. + + @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. + + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. + + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. + diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/mark.txt --- a/doc/mark.txt +++ b/doc/mark.txt @@ -6,37 +6,71 @@ .. currentmodule:: _pytest.mark -By using the ``pytest.mark`` helper you can instantiate -decorators that will set named metadata on test functions. +By using the ``pytest.mark`` helper you can easily set +metadata on your test functions. To begin with, there are +some builtin markers, for example: -Marking a single function +* skipif - skip a test function if a certain condition is met +* xfail - produce an "expected failure" outcome if a certain + condition is met + +It's also easy to create custom markers or to apply markers +to whole test classes or modules. + +marking test functions and selecting them for a run ---------------------------------------------------- -You can "mark" a test function with metadata like this:: +You can "mark" a test function with custom metadata like this:: + + # content of test_server.py import pytest @pytest.mark.webtest def test_send_http(): - ... + pass # perform some webtest test for your app -This will set the function attribute ``webtest`` to a :py:class:`MarkInfo` -instance. You can also specify parametrized metadata like this:: +.. versionadded:: 2.2 - # content of test_mark.py +You can restrict a test run only tests marked with ``webtest`` like this:: - import pytest - @pytest.mark.webtest(firefox=30) - def test_receive(): - pass + $ py.test -m webtest - @pytest.mark.webtest("functional", firefox=30) - def test_run_and_look(): - pass +Or the inverse, running all tests except the webtest ones:: + + $ py.test -m "not webtest" -and access it from other places like this:: +Registering markers +------------------------------------- - test_receive.webtest.kwargs['firefox'] == 30 - test_run_and_look.webtest.args[0] == "functional" +.. versionadded:: 2.2 + +.. ini-syntax for custom markers: + +Registering markers for your test suite is simple:: + + # content of pytest.ini + [pytest] + markers = + webtest: mark a test as a webtest. + +You can ask which markers exist for your test suite:: + + $ py.test --markers + +For an example on how to add and work markers from a plugin, see +:ref:`adding a custom marker from a plugin`. + +.. note:: + + It is recommended to explicitely register markers so that: + + * there is one place in your test suite defining your markers + + * asking for existing markers via ``py.test --markers`` gives good output + + * typos in function markers can be treated as an error if you use + the :ref:`--strict` option. Later versions of py.test might treat + non-registered markers as an error by default. .. _`scoped-marking`: @@ -58,7 +92,7 @@ This is equivalent to directly applying the decorator to the two test functions. -To remain compatible with Python2.5 you can also set a +To remain backward-compatible with Python2.4 you can also set a ``pytestmark`` attribute on a TestClass like this:: import pytest diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/plugins.txt --- a/doc/plugins.txt +++ b/doc/plugins.txt @@ -327,7 +327,6 @@ .. autofunction: pytest_runtest_logreport - Reference of important objects involved in hooks =========================================================== https://bitbucket.org/hpk42/pytest/changeset/37de67a8bea0/ changeset: 37de67a8bea0 user: hpk42 date: 2011-11-11 23:56:08 summary: add a method to the config object to dynamically add a value to an (line-type) ini-value affected #: 2 files diff -r 7535fc2cb387702e00924bad69681072a270f48a -r 37de67a8bea01ff1de584d6d2bd813b53d412817 _pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -83,6 +83,7 @@ self._inidict[name] = (help, type, default) self._ininames.append(name) + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -346,6 +347,14 @@ args.append(py.std.os.getcwd()) self.args = args + def addinivalue_line(self, name, line): + """ add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes the + the first line in its value. """ + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + def getini(self, name): """ return configuration value from an ini file. If the specified name hasn't been registered through a prior ``parse.addini`` diff -r 7535fc2cb387702e00924bad69681072a270f48a -r 37de67a8bea01ff1de584d6d2bd813b53d412817 testing/test_config.py --- a/testing/test_config.py +++ b/testing/test_config.py @@ -208,6 +208,40 @@ l = config.getini("a2") assert l == [] + def test_addinivalue_line_existing(self, testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + parser.addini("xy", "", type="linelist") + """) + p = testdir.makeini(""" + [pytest] + xy= 123 + """) + config = testdir.parseconfig() + l = config.getini("xy") + assert len(l) == 1 + assert l == ["123"] + config.addinivalue_line("xy", "456") + l = config.getini("xy") + assert len(l) == 2 + assert l == ["123", "456"] + + def test_addinivalue_line_new(self, testdir): + testdir.makeconftest(""" + def pytest_addoption(parser): + parser.addini("xy", "", type="linelist") + """) + config = testdir.parseconfig() + assert not config.getini("xy") + config.addinivalue_line("xy", "456") + l = config.getini("xy") + assert len(l) == 1 + assert l == ["456"] + config.addinivalue_line("xy", "123") + l = config.getini("xy") + assert len(l) == 2 + assert l == ["456", "123"] + def test_options_on_small_file_do_not_blow_up(testdir): def runfiletest(opts): reprec = testdir.inline_run(*opts) https://bitbucket.org/hpk42/pytest/changeset/96a571acab65/ changeset: 96a571acab65 user: hpk42 date: 2011-11-11 23:56:11 summary: add ini-file "markers" option and a cmdline option "--markers" to show defined markers. Add "skipif", "xfail" etc. to the set of builtin markers shown with the --markers option. affected #: 12 files diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,8 +1,18 @@ Changes between 2.1.3 and XXX 2.2.0 ---------------------------------------- -- new feature to help optimizing your tests: --durations=N option for - displaying N slowest test calls and setup/teardown methods. +- introduce registration for "pytest.mark.*" helpers via ini-files + or through plugin hooks. Also introduce a "--strict" option which + will treat unregistered markers as errors + allowing to avoid typos and maintain a well described set of markers + for your test suite. See exaples at http://pytest.org/latest/mark.html + and its links. +- XXX introduce "-m marker" option to select tests based on markers + (this is a stricter more predictable version of '-k' which also matches + substrings and compares against the test function name etc.) +- new feature to help optimizing the speed of your tests: + --durations=N option for displaying N slowest test calls + and setup/teardown methods. - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev5' +__version__ = '2.2.0.dev6' diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -211,6 +211,14 @@ self.register(mod, modname) self.consider_module(mod) + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + def pytest_plugin_registered(self, plugin): import pytest dic = self.call_plugin(plugin, "pytest_namespace", {}) or {} diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -29,6 +29,9 @@ action="store", type="int", dest="maxfail", default=0, help="exit after first num failures or errors.") + group._addoption('--strict', action="store_true", + help="run pytest in strict mode, warnings become errors.") + group = parser.getgroup("collect", "collection") group.addoption('--collectonly', action="store_true", dest="collectonly", diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/mark.py --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -14,6 +14,24 @@ "Terminate expression with ':' to make the first match match " "all subsequent tests (usually file-order). ") + group.addoption("--markers", action="store_true", help= + "show markers (builtin, plugin and per-project ones).") + + parser.addini("markers", "markers for test functions", 'linelist') + +def pytest_cmdline_main(config): + if config.option.markers: + config.pluginmanager.do_configure(config) + tw = py.io.TerminalWriter() + for line in config.getini("markers"): + name, rest = line.split(":", 1) + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config.pluginmanager.do_unconfigure(config) + return 0 +pytest_cmdline_main.tryfirst = True + def pytest_collection_modifyitems(items, config): keywordexpr = config.option.keyword if not keywordexpr: @@ -37,13 +55,17 @@ config.hook.pytest_deselected(items=deselected) items[:] = remaining +def pytest_configure(config): + if config.option.strict: + pytest.mark._config = config + def skipbykeyword(colitem, keywordexpr): """ return True if they given keyword expression means to skip this collector/item. """ if not keywordexpr: return - + itemkeywords = getkeywords(colitem) for key in filter(None, keywordexpr.split()): eor = key[:1] == '-' @@ -77,15 +99,31 @@ @py.test.mark.slowtest def test_function(): pass - + will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ def __getattr__(self, name): if name[0] == "_": raise AttributeError(name) + if hasattr(self, '_config'): + self._check(name) return MarkDecorator(name) + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = l = set() + for line in self._config.getini("markers"): + beginning = line.split(":", 1) + x = beginning[0].split("(", 1)[0] + l.add(x) + if name not in self._markers: + raise AttributeError("%r not a registered marker" % (name,)) + class MarkDecorator: """ A decorator for test functions and test classes. When applied it will create :class:`MarkInfo` objects which may be diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/skipping.py --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -9,6 +9,21 @@ action="store_true", dest="runxfail", default=False, help="run tests even if they are marked xfail") +def pytest_configure(config): + config.addinivalue_line("markers", + "skipif(*conditions): skip the given test function if evaluation " + "of all conditions has a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. " + ) + config.addinivalue_line("markers", + "xfail(*conditions, reason=None, run=True): mark the the test function " + "as an expected failure. Optionally specify a reason and run=False " + "if you don't even want to execute the test function. Any positional " + "condition strings will be evaluated (like with skipif) and if one is " + "False the marker will not be applied." + ) + def pytest_namespace(): return dict(xfail=xfail) diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c doc/mark.txt --- a/doc/mark.txt +++ b/doc/mark.txt @@ -68,9 +68,9 @@ * asking for existing markers via ``py.test --markers`` gives good output - * typos in function markers can be treated as an error if you use - the :ref:`--strict` option. Later versions of py.test might treat - non-registered markers as an error by default. + * typos in function markers are treated as an error if you use + the ``--strict`` option. Later versions of py.test are probably + going to treat non-registered markers as an error. .. _`scoped-marking`: diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev5', + version='2.2.0.dev6', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/conftest.py --- a/testing/conftest.py +++ b/testing/conftest.py @@ -12,6 +12,10 @@ help=("run FD checks if lsof is available")) def pytest_configure(config): + config.addinivalue_line("markers", + "multi(arg=[value1,value2, ...]): call the test function " + "multiple times with arg=value1, then with arg=value2, ... " + ) if config.getvalue("lsof"): try: out = py.process.cmdexec("lsof -p %d" % pid) diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_core.py --- a/testing/test_core.py +++ b/testing/test_core.py @@ -644,3 +644,10 @@ assert "1" in tags assert "2" in tags assert args == (42,) + +def test_default_markers(testdir): + result = testdir.runpytest("--markers") + result.stdout.fnmatch_lines([ + "*tryfirst*first*", + "*trylast*last*", + ]) diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_mark.py --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -68,7 +68,54 @@ assert 'reason' not in g.some.kwargs assert g.some.kwargs['reason2'] == "456" + +def test_ini_markers(testdir): + testdir.makeini(""" + [pytest] + markers = + a1: this is a webtest marker + a2: this is a smoke marker + """) + testdir.makepyfile(""" + def test_markers(pytestconfig): + markers = pytestconfig.getini("markers") + print (markers) + assert len(markers) >= 2 + assert markers[0].startswith("a1:") + assert markers[1].startswith("a2:") + """) + rec = testdir.inline_run() + rec.assertoutcome(passed=1) + +def test_markers_option(testdir): + testdir.makeini(""" + [pytest] + markers = + a1: this is a webtest marker + a1some: another marker + """) + result = testdir.runpytest("--markers", ) + result.stdout.fnmatch_lines([ + "*a1*this is a webtest*", + "*a1some*another marker", + ]) + + +def test_strict_prohibits_unregistered_markers(testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.unregisteredmark + def test_hello(): + pass + """) + result = testdir.runpytest("--strict") + assert result.ret != 0 + result.stdout.fnmatch_lines([ + "*unregisteredmark*not*registered*", + ]) + class TestFunctional: + def test_mark_per_function(self, testdir): p = testdir.makepyfile(""" import pytest diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_skipping.py --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -549,3 +549,10 @@ ]) +def test_default_markers(testdir): + result = testdir.runpytest("--markers") + result.stdout.fnmatch_lines([ + "*skipif(*conditions)*skip*", + "*xfail(*conditions, reason=None, run=True)*expected failure*", + ]) + https://bitbucket.org/hpk42/pytest/changeset/d8cce78bee2b/ changeset: d8cce78bee2b user: hpk42 date: 2011-11-12 00:02:06 summary: introduce a new -m mark_expression option affected #: 9 files diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -7,9 +7,10 @@ allowing to avoid typos and maintain a well described set of markers for your test suite. See exaples at http://pytest.org/latest/mark.html and its links. -- XXX introduce "-m marker" option to select tests based on markers - (this is a stricter more predictable version of '-k' which also matches - substrings and compares against the test function name etc.) +- introduce "-m marker" option to select tests based on markers + (this is a stricter and more predictable version of '-k' in that + "-m" only matches complete markers and has more obvious rules + for and/or semantics. - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev6' +__version__ = '2.2.0.dev7' diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/mark.py --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -14,6 +14,12 @@ "Terminate expression with ':' to make the first match match " "all subsequent tests (usually file-order). ") + group._addoption("-m", + action="store", dest="markexpr", default="", metavar="MARKEXPR", + help="only run tests which match given mark expression. " + "An expression is a python expression which can use " + "marker names.") + group.addoption("--markers", action="store_true", help= "show markers (builtin, plugin and per-project ones).") @@ -34,10 +40,11 @@ def pytest_collection_modifyitems(items, config): keywordexpr = config.option.keyword - if not keywordexpr: + matchexpr = config.option.markexpr + if not keywordexpr and not matchexpr: return selectuntil = False - if keywordexpr[-1] == ":": + if keywordexpr[-1:] == ":": selectuntil = True keywordexpr = keywordexpr[:-1] @@ -47,14 +54,27 @@ if keywordexpr and skipbykeyword(colitem, keywordexpr): deselected.append(colitem) else: - remaining.append(colitem) if selectuntil: keywordexpr = None + if matchexpr: + if not matchmark(colitem, matchexpr): + deselected.append(colitem) + continue + remaining.append(colitem) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining +class BoolDict: + def __init__(self, mydict): + self._mydict = mydict + def __getitem__(self, name): + return name in self._mydict + +def matchmark(colitem, matchexpr): + return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__)) + def pytest_configure(config): if config.option.strict: pytest.mark._config = config diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/terminal.py --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -440,8 +440,15 @@ def summary_deselected(self): if 'deselected' in self.stats: + l = [] + k = self.config.option.keyword + if k: + l.append("-k%s" % k) + m = self.config.option.markexpr + if m: + l.append("-m %r" % m) self.write_sep("=", "%d tests deselected by %r" %( - len(self.stats['deselected']), self.config.option.keyword), bold=True) + len(self.stats['deselected']), " ".join(l)), bold=True) def repr_pythonversion(v=None): if v is None: diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/Makefile --- a/doc/Makefile +++ b/doc/Makefile @@ -40,7 +40,7 @@ -rm -rf $(BUILDDIR)/* install: html - @rsync -avz _build/html/ pytest.org:/www/pytest.org/latest + @rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0.dev7 installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/index.txt --- a/doc/index.txt +++ b/doc/index.txt @@ -26,8 +26,8 @@ - **supports functional testing and complex test setups** - (new in 2.2) :ref:`durations` + - (much improved in 2.2) :ref:`marking and test selection ` - advanced :ref:`skip and xfail` - - generic :ref:`marking and test selection ` - can :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - can :ref:`continuously re-run failing tests ` - many :ref:`builtin helpers ` diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/mark.txt --- a/doc/mark.txt +++ b/doc/mark.txt @@ -28,16 +28,34 @@ @pytest.mark.webtest def test_send_http(): pass # perform some webtest test for your app + def test_something_quick(): + pass .. versionadded:: 2.2 -You can restrict a test run only tests marked with ``webtest`` like this:: +You can then restrict a test run to only run tests marked with ``webtest``:: - $ py.test -m webtest + $ py.test -v -m webtest + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python + collecting ... collected 2 items + + test_server.py:3: test_send_http PASSED + + ===================== 1 tests deselected by "-m 'webtest'" ===================== + ==================== 1 passed, 1 deselected in 0.01 seconds ==================== Or the inverse, running all tests except the webtest ones:: - $ py.test -m "not webtest" + $ py.test -v -m "not webtest" + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python + collecting ... collected 2 items + + test_server.py:6: test_something_quick PASSED + + =================== 1 tests deselected by "-m 'not webtest'" =================== + ==================== 1 passed, 1 deselected in 0.01 seconds ==================== Registering markers ------------------------------------- @@ -53,9 +71,19 @@ markers = webtest: mark a test as a webtest. -You can ask which markers exist for your test suite:: +You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:: $ py.test --markers + @pytest.mark.webtest: mark a test as a webtest. + + @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. + + @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. + + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. + + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. + For an example on how to add and work markers from a plugin, see :ref:`adding a custom marker from a plugin`. @@ -118,39 +146,42 @@ Using ``-k TEXT`` to select tests ---------------------------------------------------- -You can use the ``-k`` command line option to select tests:: +You can use the ``-k`` command line option to only run tests with names that match the given argument:: - $ py.test -k webtest # running with the above defined examples yields - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + $ py.test -k send_http # running with the above defined examples + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 collecting ... collected 4 items - test_mark.py .. - test_mark_classlevel.py .. + test_server.py . - ========================= 4 passed in 0.03 seconds ========================= + ===================== 3 tests deselected by '-ksend_http' ====================== + ==================== 1 passed, 3 deselected in 0.02 seconds ==================== And you can also run all tests except the ones that match the keyword:: - $ py.test -k-webtest - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + $ py.test -k-send_http + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 collecting ... collected 4 items - ===================== 4 tests deselected by '-webtest' ===================== - ======================= 4 deselected in 0.02 seconds ======================= + test_mark_classlevel.py .. + test_server.py . + + ===================== 1 tests deselected by '-k-send_http' ===================== + ==================== 3 passed, 1 deselected in 0.03 seconds ==================== Or to only select the class:: $ py.test -kTestClass - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 collecting ... collected 4 items test_mark_classlevel.py .. - ==================== 2 tests deselected by 'TestClass' ===================== - ================== 2 passed, 2 deselected in 0.02 seconds ================== + ===================== 2 tests deselected by '-kTestClass' ====================== + ==================== 2 passed, 2 deselected in 0.02 seconds ==================== API reference for mark related objects ------------------------------------------------ diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev6', + version='2.2.0.dev7', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b testing/test_mark.py --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -114,6 +114,30 @@ "*unregisteredmark*not*registered*", ]) + at pytest.mark.multi(spec=[ + ("xyz", ("test_one",)), + ("xyz and xyz2", ()), + ("xyz2", ("test_two",)), + ("xyz or xyz2", ("test_one", "test_two"),) +]) +def test_mark_option(spec, testdir): + testdir.makepyfile(""" + import pytest + @pytest.mark.xyz + def test_one(): + pass + @pytest.mark.xyz2 + def test_two(): + pass + """) + opt, passed_result = spec + rec = testdir.inline_run("-m", opt) + passed, skipped, fail = rec.listoutcomes() + passed = [x.nodeid.split("::")[-1] for x in passed] + assert len(passed) == len(passed_result) + assert list(passed) == list(passed_result) + + class TestFunctional: def test_mark_per_function(self, testdir): Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sat Nov 12 00:20:58 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 11 Nov 2011 23:20:58 -0000 Subject: [py-svn] commit/pytest: hpk42: fix test Message-ID: <20111111232058.384.35892@bitbucket03.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/e97f2a87fefa/ changeset: e97f2a87fefa user: hpk42 date: 2011-11-12 00:18:33 summary: fix test affected #: 1 file diff -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b -r e97f2a87fefa4a7280a3a7164b99b2a4d1c6e840 testing/test_terminal.py --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -340,7 +340,7 @@ result = testdir.runpytest("-k", "test_two:", testpath) result.stdout.fnmatch_lines([ "*test_deselected.py ..", - "=* 1 test*deselected by 'test_two:'*=", + "=* 1 test*deselected by*test_two:*=", ]) assert result.ret == 0 Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sat Nov 12 16:16:30 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Sat, 12 Nov 2011 15:16:30 -0000 Subject: [py-svn] commit/pytest: hpk42: fix issue50 (add a reference to the already implemented -m) and improve release annoucnement and changelog. Message-ID: <20111112151630.30004.77205@bitbucket12.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/9f8512dc3ee8/ changeset: 9f8512dc3ee8 user: hpk42 date: 2011-11-12 16:10:12 summary: fix issue50 (add a reference to the already implemented -m) and improve release annoucnement and changelog. affected #: 2 files diff -r e97f2a87fefa4a7280a3a7164b99b2a4d1c6e840 -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -7,10 +7,10 @@ allowing to avoid typos and maintain a well described set of markers for your test suite. See exaples at http://pytest.org/latest/mark.html and its links. -- introduce "-m marker" option to select tests based on markers - (this is a stricter and more predictable version of '-k' in that - "-m" only matches complete markers and has more obvious rules - for and/or semantics. +- issue50: introduce "-m marker" option to select tests based on markers + (this is a stricter and more predictable version of '-k' in that "-m" + only matches complete markers and has more obvious rules for and/or + semantics. - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. diff -r e97f2a87fefa4a7280a3a7164b99b2a4d1c6e840 -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -1,11 +1,22 @@ -py.test 2.2.0: new test duration profiling and bug fixes +py.test 2.2.0: improved test markers and duration profiling =========================================================================== -pytest-2.2.0 is a quite backward compatible release of the popular -py.test testing tool. It introduces the new "--duration=N" option -showing the N slowest test execution or setup/teardown calls. The -release also contains a few fixes and some cleanup of pytest's own test -suite allowing it to run on a wider range of environments. +pytest-2.2.0 is a quite (*) backward compatible release of the popular +py.test testing tool. It includes the following new features: + +* new "--duration=N" option showing the N slowest test execution + or setup/teardown calls. + +* new "-m markexpr" option for selecting tests according to their mark + +* new ini-variable for registering test markers and a "--strict" + option that will error out if you are using unregistered markers + +Usages of the improved marking mechanism is illustrated by a couple +of initial examples, see XXX + +Besides there is the usual set of bug fixes along with a cleanup of +pytest's own test suite allowing it to run on a wider range of environments. For general information, see extensive docs with examples here: @@ -16,19 +27,22 @@ pip install -U pytest # or easy_install -U pytest -incompatible change ------------------------------------- + +(*) incompatible changes: * You need a new version of the pytest-xdist plugin (1.7) for distributing test runs. -* Also other plugins might need an upgrade if they implement +* Other plugins might need an upgrade if they implement the ``pytest_runtest_logreport`` hook which now is called unconditionally for the setup/teardown fixture phases of a test. You can just choose to ignore them by inserting "if rep.when != 'call': return". Note that most code probably "just" works because the hook was already called for failing setup/teardown phases of a test. +Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner XXX for their +help and feedback on various issues. + best, holger krekel Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 15 14:28:44 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 15 Nov 2011 13:28:44 -0000 Subject: [py-svn] commit/pytest: hpk42: fix issue89 apply Daniel Nouri's patch to doctest/--pdb interaction. Message-ID: <20111115132844.7391.6342@bitbucket03.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/f295bff17f6e/ changeset: f295bff17f6e user: hpk42 date: 2011-11-15 14:28:22 summary: fix issue89 apply Daniel Nouri's patch to doctest/--pdb interaction. affected #: 5 files diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r f295bff17f6efdeb00799132bf378b7ec4af8452 AUTHORS --- a/AUTHORS +++ b/AUTHORS @@ -22,3 +22,4 @@ Grig Gheorghiu Bob Ippolito Christian Tismer +Daniel Nuri diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r f295bff17f6efdeb00799132bf378b7ec4af8452 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -14,6 +14,7 @@ - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. +- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r f295bff17f6efdeb00799132bf378b7ec4af8452 _pytest/pdb.py --- a/_pytest/pdb.py +++ b/_pytest/pdb.py @@ -70,7 +70,13 @@ tw.sep(">", "traceback") rep.toterminal(tw) tw.sep(">", "entering PDB") - post_mortem(call.excinfo._excinfo[2]) + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + if isinstance(call.excinfo.value, py.std.doctest.UnexpectedException): + tb = call.excinfo.value.exc_info[2] + else: + tb = call.excinfo._excinfo[2] + post_mortem(tb) rep._pdbshown = True return rep diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r f295bff17f6efdeb00799132bf378b7ec4af8452 doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -40,7 +40,7 @@ most code probably "just" works because the hook was already called for failing setup/teardown phases of a test. -Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner XXX for their +Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their help and feedback on various issues. best, diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r f295bff17f6efdeb00799132bf378b7ec4af8452 testing/test_pdb.py --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -106,6 +106,26 @@ if child.isalive(): child.wait() + def test_pdb_interaction_doctest(self, testdir): + p1 = testdir.makepyfile(""" + import pytest + def function_1(): + ''' + >>> i = 0 + >>> assert i == 1 + ''' + """) + child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1) + child.expect("(Pdb)") + child.sendline('i') + child.expect("0") + child.expect("(Pdb)") + child.sendeof() + rest = child.read() + assert "1 failed" in rest + if child.isalive(): + child.wait() + def test_pdb_interaction_capturing_twice(self, testdir): p1 = testdir.makepyfile(""" import pytest Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 15 14:35:06 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 15 Nov 2011 13:35:06 -0000 Subject: [py-svn] commit/pytest: RonnyPfannschmidt: test and fix pastebin xmlrpc import name missmatch, fixes #87 Message-ID: <20111115133506.10755.57486@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/6ecdb48a6121/ changeset: 6ecdb48a6121 user: RonnyPfannschmidt date: 2011-11-14 17:51:12 summary: test and fix pastebin xmlrpc import name missmatch, fixes #87 affected #: 2 files diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r 6ecdb48a61216dc23007c27750c7e35a8bcfc9f4 _pytest/pastebin.py --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -38,7 +38,11 @@ del tr._tw.__dict__['write'] def getproxy(): - return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes + if sys.version_info < (3, 0): + from xmlrpclib import ServerProxy + else: + from xmlrpc.client import ServerProxy + return ServerProxy(url.xmlrpc).pastes def pytest_terminal_summary(terminalreporter): if terminalreporter.config.option.pastebin != "failed": diff -r 9f8512dc3ee8f76491d39f3283642e4833c6e99c -r 6ecdb48a61216dc23007c27750c7e35a8bcfc9f4 testing/test_pastebin.py --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -1,3 +1,4 @@ +import pytest class TestPasting: def pytest_funcarg__pastebinlist(self, request): @@ -45,3 +46,14 @@ for x in 'test_fail test_skip skipped'.split(): assert s.find(x), (s, x) + +class TestRPCClient: + def pytest_funcarg__pastebin(self, request): + return request.config.pluginmanager.getplugin('pastebin') + + def test_getproxy(self, pastebin): + proxy = pastebin.getproxy() + assert proxy is not None + assert proxy.__class__.__module__.startswith('xmlrpc') + + Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Tue Nov 15 14:36:11 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Tue, 15 Nov 2011 13:36:11 -0000 Subject: [py-svn] commit/pytest: hpk42: add changelog entry for issue87 Message-ID: <20111115133611.5460.16380@bitbucket13.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/179db3bcfdb5/ changeset: 179db3bcfdb5 user: hpk42 date: 2011-11-15 14:36:02 summary: add changelog entry for issue87 affected #: 1 file diff -r 4fe5f94468c40aca326d301d2a23f0c86ed80a71 -r 179db3bcfdb557cea10779712d508762b91224fb CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -14,6 +14,7 @@ - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. +- fix issue87: --pastebin now works with python3 - fix issue89: --pdb with unexpected exceptions in doctest work more sensibly - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Thu Nov 17 12:11:49 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Thu, 17 Nov 2011 11:11:49 -0000 Subject: [py-svn] commit/pytest: hpk42: introduce metafunc.parametrize() and @pytest.mark.parametrize with examples. deprecate metafunc.addcall() Message-ID: <20111117111149.11611.30467@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/90eef85d16b1/ changeset: 90eef85d16b1 user: hpk42 date: 2011-11-17 12:09:21 summary: introduce metafunc.parametrize() and @pytest.mark.parametrize with examples. deprecate metafunc.addcall() affected #: 11 files diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,13 @@ Changes between 2.1.3 and XXX 2.2.0 ---------------------------------------- +- add an all-powerful metafunc.parametrize function which allows to + parametrize test function arguments in multiple steps and therefore + from indepdenent plugins and palces. +- add a @pytest.mark.parametrize helper which allows to easily + call a test function with different argument values +- Add examples to the "parametrize" example page, including a quick port + of Test scenarios and the new parametrize function and decorator. - introduce registration for "pytest.mark.*" helpers via ini-files or through plugin hooks. Also introduce a "--strict" option which will treat unregistered markers as errors diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev7' +__version__ = '2.2.0.dev8' diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa _pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -4,6 +4,7 @@ import sys import pytest from py._code.code import TerminalRepr +from _pytest.monkeypatch import monkeypatch import _pytest cutdir = py.path.local(_pytest.__file__).dirpath() @@ -26,6 +27,23 @@ showfuncargs(config) return 0 + +def pytest_generate_tests(metafunc): + try: + param = metafunc.function.parametrize + except AttributeError: + return + metafunc.parametrize(*param.args, **param.kwargs) + +def pytest_configure(config): + config.addinivalue_line("markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in multiple different argument value sets. Example: " + "@parametrize(arg1, [1,2]) would lead to two calls of the decorated " + "test function, one with arg1=1 and another with arg1=2." + ) + + @pytest.mark.trylast def pytest_namespace(): raises.Exception = pytest.fail.Exception @@ -425,6 +443,7 @@ "yielded functions (deprecated) cannot have funcargs") else: if callspec is not None: + self.callspec = callspec self.funcargs = callspec.funcargs or {} self._genid = callspec.id if hasattr(callspec, "param"): @@ -501,15 +520,59 @@ request._fillfuncargs() _notexists = object() -class CallSpec: - def __init__(self, funcargs, id, param): - self.funcargs = funcargs - self.id = id + +class CallSpec2(object): + def __init__(self, metafunc): + self.metafunc = metafunc + self.funcargs = {} + self._idlist = [] + self.params = {} + self._globalid = _notexists + self._globalid_args = set() + self._globalparam = _notexists + + def copy(self, metafunc): + cs = CallSpec2(self.metafunc) + cs.funcargs.update(self.funcargs) + cs.params.update(self.params) + cs._idlist = list(self._idlist) + cs._globalid = self._globalid + cs._globalid_args = self._globalid_args + cs._globalparam = self._globalparam + return cs + + def _checkargnotcontained(self, arg): + if arg in self.params or arg in self.funcargs: + raise ValueError("duplicate %r" %(arg,)) + + def getparam(self, name): + try: + return self.params[name] + except KeyError: + if self._globalparam is _notexists: + raise ValueError(name) + return self._globalparam + + @property + def id(self): + return "-".join(filter(None, self._idlist)) + + def setmulti(self, valtype, argnames, valset, id): + for arg,val in zip(argnames, valset): + self._checkargnotcontained(arg) + getattr(self, valtype)[arg] = val + self._idlist.append(id) + + def setall(self, funcargs, id, param): + for x in funcargs: + self._checkargnotcontained(x) + self.funcargs.update(funcargs) + if id is not _notexists: + self._idlist.append(id) if param is not _notexists: - self.param = param - def __repr__(self): - return "" %( - self.id, getattr(self, 'param', '?'), self.funcargs) + assert self._globalparam is _notexists + self._globalparam = param + class Metafunc: def __init__(self, function, config=None, cls=None, module=None): @@ -523,31 +586,69 @@ self._calls = [] self._ids = py.builtin.set() + def parametrize(self, argnames, argvalues, indirect=False, ids=None): + """ parametrize calls to the underlying test function during + the collection phase of a test run. parametrize may be called + multiple times for disjunct argnames sets. + + :arg argnames: an argument name or a list of argument names + + :arg argvalues: a list of values for a single argument if argnames + specified a single argument only or a list of tuples which specify + values for the multiple argument names. + + :arg indirect: if True each argvalue corresponding to an argument will be + passed as request.param to the respective funcarg factory so that + it can perform more expensive setups during the setup phase of + a test rather than at collection time (which is the default). + + :arg ids: list of string ids corresponding to the (list of) argvalues + so that they are part of the test id. If no ids are provided + they will be generated automatically from the argvalues. + """ + if not isinstance(argnames, (tuple, list)): + argnames = (argnames,) + argvalues = [(val,) for val in argvalues] + for arg in argnames: + if arg not in self.funcargnames: + raise ValueError("%r has no argument %r" %(self.function, arg)) + valtype = indirect and "params" or "funcargs" + if not ids: + idmaker = IDMaker() + ids = list(map(idmaker, argvalues)) + newcalls = [] + for callspec in self._calls or [CallSpec2(self)]: + for i, valset in enumerate(argvalues): + assert len(valset) == len(argnames) + newcallspec = callspec.copy(self) + newcallspec.setmulti(valtype, argnames, valset, ids[i]) + newcalls.append(newcallspec) + self._calls = newcalls + def addcall(self, funcargs=None, id=_notexists, param=_notexists): - """ add a new call to the underlying test function during the - collection phase of a test run. Note that request.addcall() is + """ (deprecated, use parametrize) add a new call to the underlying + test function during + the collection phase of a test run. Note that request.addcall() is called during the test collection phase prior and independently - to actual test execution. Therefore you should perform setup - of resources in a funcarg factory which can be instrumented - with the ``param``. + to actual test execution. You should only use addcall() + if you need to specify multiple arguments of a test function :arg funcargs: argument keyword dictionary used when invoking the test function. :arg id: used for reporting and identification purposes. If you - don't supply an `id` the length of the currently - list of calls to the test function will be used. + don't supply an `id` an automatic unique id will be generated. - :arg param: will be exposed to a later funcarg factory invocation - through the ``request.param`` attribute. It allows to - defer test fixture setup activities to when an actual - test is run. + :arg param: a parameter which will be exposed to a later funcarg factory + invocation through the ``request.param`` attribute. """ assert funcargs is None or isinstance(funcargs, dict) if funcargs is not None: for name in funcargs: if name not in self.funcargnames: pytest.fail("funcarg %r not used in this function." % name) + else: + funcargs = {} if id is None: raise ValueError("id=None not allowed") if id is _notexists: @@ -556,11 +657,26 @@ if id in self._ids: raise ValueError("duplicate id %r" % id) self._ids.add(id) - self._calls.append(CallSpec(funcargs, id, param)) + + cs = CallSpec2(self) + cs.setall(funcargs, id, param) + self._calls.append(cs) + +class IDMaker: + def __init__(self): + self.counter = 0 + def __call__(self, valset): + l = [] + for val in valset: + if not isinstance(val, (int, str)): + val = "."+str(self.counter) + self.counter += 1 + l.append(str(val)) + return "-".join(l) class FuncargRequest: """ A request for function arguments from a test function. - + Note that there is an optional ``param`` attribute in case there was an invocation to metafunc.addcall(param=...). If no such call was done in a ``pytest_generate_tests`` @@ -693,11 +809,18 @@ self._raiselookupfailed(argname) funcargfactory = self._name2factory[argname].pop() oldarg = self._currentarg - self._currentarg = argname + mp = monkeypatch() + mp.setattr(self, '_currentarg', argname) + try: + param = self._pyfuncitem.callspec.getparam(argname) + except (AttributeError, ValueError): + pass + else: + mp.setattr(self, 'param', param, raising=False) try: self._funcargs[argname] = res = funcargfactory(request=self) finally: - self._currentarg = oldarg + mp.undo() return res def _getscopeitem(self, scope): diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -1,19 +1,25 @@ py.test 2.2.0: improved test markers and duration profiling =========================================================================== -pytest-2.2.0 is a quite (*) backward compatible release of the popular -py.test testing tool. It includes the following new features: +pytest-2.2.0 is a quite [1] backward compatible release of the popular +py.test testing tool. There are a couple of new features: -* new "--duration=N" option showing the N slowest test execution +* "--duration=N" option showing the N slowest test execution or setup/teardown calls. -* new "-m markexpr" option for selecting tests according to their mark +* @pytest.mark.parametrize decorator for runnin test functions + with multiple values and a new more powerful metafunc.parametrize() + helper to be used from pytest_generate_tests. Multiple parametrize + functions can now be invoked for the same test function. -* new ini-variable for registering test markers and a "--strict" - option that will error out if you are using unregistered markers +* "-m markexpr" option for selecting tests according to their mark and + a new "markers" ini-variable for registering test markers. The new "--strict" + option will bail out with an error if you are using unregistered markers. +Usage of improved parametrize is documented in examples at +http://pytest.org/latest/example/parametrize.html Usages of the improved marking mechanism is illustrated by a couple -of initial examples, see XXX +of initial examples, see http://pytest.org/latest/example/markers.html Besides there is the usual set of bug fixes along with a cleanup of pytest's own test suite allowing it to run on a wider range of environments. @@ -27,8 +33,15 @@ pip install -U pytest # or easy_install -U pytest +Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their +help and feedback on various issues. -(*) incompatible changes: +best, +holger krekel + + +[1] notes on incompatibility +------------------------------ * You need a new version of the pytest-xdist plugin (1.7) for distributing test runs. @@ -40,9 +53,3 @@ most code probably "just" works because the hook was already called for failing setup/teardown phases of a test. -Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their -help and feedback on various issues. - -best, -holger krekel - diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/example/multipython.py --- a/doc/example/multipython.py +++ b/doc/example/multipython.py @@ -7,13 +7,11 @@ pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8'] def pytest_generate_tests(metafunc): - if 'python1' in metafunc.funcargnames: - assert 'python2' in metafunc.funcargnames - for obj in metafunc.function.multiarg.kwargs['obj']: - for py1 in pythonlist: - for py2 in pythonlist: - metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj), - param=(py1, py2, obj)) + for arg in metafunc.funcargnames: + if arg.startswith("python"): + metafunc.parametrize(arg, pythonlist, indirect=True) + elif arg == "obj": + metafunc.parametrize("obj", metafunc.function.multiarg.kwargs['obj']) @py.test.mark.multiarg(obj=[42, {}, {1:3},]) def test_basic_objects(python1, python2, obj): @@ -23,14 +21,11 @@ def pytest_funcarg__python1(request): tmpdir = request.getfuncargvalue("tmpdir") picklefile = tmpdir.join("data.pickle") - return Python(request.param[0], picklefile) + return Python(request.param, picklefile) def pytest_funcarg__python2(request): python1 = request.getfuncargvalue("python1") - return Python(request.param[1], python1.picklefile) - -def pytest_funcarg__obj(request): - return request.param[2] + return Python(request.param, python1.picklefile) class Python: def __init__(self, version, picklefile): diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/example/parametrize.txt --- a/doc/example/parametrize.txt +++ b/doc/example/parametrize.txt @@ -4,18 +4,69 @@ Parametrizing tests ================================================= -py.test allows to easily implement your own custom -parametrization scheme for tests. Here we provide -some examples for inspiration and re-use. +.. currentmodule:: _pytest.python + +py.test allows to easily parametrize test functions. +In the following we provide some examples using +the builtin mechanisms. + +.. _parametrizemark: + +simple "decorator" parametrization of a test function +---------------------------------------------------------------------------- + +.. versionadded:: 2.2 + +The builtin ``parametrize`` marker allows you to easily write generic +test functions that will be invoked with multiple input/output values:: + + # content of test_expectation.py + import pytest + @pytest.mark.parametrize(("input", "expected"), [ + ("3+5", 8), + ("2+4", 6), + ("6*9", 42), + ]) + def test_eval(input, expected): + assert eval(input) == expected + +Here we parametrize two arguments of the test function so that the test +function is called three times. Let's run it:: + + $ py.test -q + collecting ... collected 3 items + ..F + =================================== FAILURES =================================== + ______________________________ test_eval[6*9-42] _______________________________ + + input = '6*9', expected = 42 + + @pytest.mark.parametrize(("input", "expected"), [ + ("3+5", 8), + ("2+4", 6), + ("6*9", 42), + ]) + def test_eval(input, expected): + > assert eval(input) == expected + E assert 54 == 42 + E + where 54 = eval('6*9') + + test_expectation.py:9: AssertionError + 1 failed, 2 passed in 0.03 seconds + +As expected only one pair of input/output values fails the simple test function. + +Note that there are various ways how you can mark groups of functions, +see :ref:`mark`. Generating parameters combinations, depending on command line ---------------------------------------------------------------------------- .. regendoc:wipe -Let's say we want to execute a test with different parameters -and the parameter range shall be determined by a command -line argument. Let's first write a simple computation test:: +Let's say we want to execute a test with different computation +parameters and the parameter range shall be determined by a command +line argument. Let's first write a simple (do-nothing) computation test:: # content of test_compute.py @@ -36,8 +87,7 @@ end = 5 else: end = 2 - for i in range(end): - metafunc.addcall(funcargs={'param1': i}) + metafunc.parametrize("param1", range(end)) This means that we only run 2 tests if we do not pass ``--all``:: @@ -52,8 +102,8 @@ $ py.test -q --all collecting ... collected 5 items ....F - ================================= FAILURES ================================= - _____________________________ test_compute[4] ______________________________ + =================================== FAILURES =================================== + _______________________________ test_compute[4] ________________________________ param1 = 4 @@ -67,15 +117,73 @@ As expected when running the full range of ``param1`` values we'll get an error on the last one. -Deferring the setup of parametrizing resources +a quick port of "testscenarios" +------------------------------------ + +.. _`test scenarios`: http://bazaar.launchpad.net/~lifeless/testscenarios/trunk/annotate/head%3A/doc/example.py + +Here is a quick port of to run tests configured with `test scenarios`_, +an add-on from Robert Collins for the standard unittest framework. We +only have to work a bit to construct the correct arguments for pytest's +:py:func:`Metafunc.parametrize`:: + + # content of test_scenarios.py + + def pytest_generate_tests(metafunc): + idlist = [] + argvalues = [] + for scenario in metafunc.cls.scenarios: + idlist.append(scenario[0]) + items = scenario[1].items() + argnames = [x[0] for x in items] + argvalues.append(([x[1] for x in items])) + metafunc.parametrize(argnames, argvalues, ids=idlist) + + scenario1 = ('basic', {'attribute': 'value'}) + scenario2 = ('advanced', {'attribute': 'value2'}) + + class TestSampleWithScenarios: + scenarios = [scenario1, scenario2] + + def test_demo(self, attribute): + assert isinstance(attribute, str) + +this is a fully self-contained example which you can run with:: + + $ py.test test_scenarios.py + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + collecting ... collected 2 items + + test_scenarios.py .. + + =========================== 2 passed in 0.02 seconds =========================== + +If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: + + + $ py.test --collectonly test_scenarios.py + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + collecting ... collected 2 items + + + + + + + =============================== in 0.01 seconds =============================== + +Deferring the setup of parametrized resources --------------------------------------------------- .. regendoc:wipe The parametrization of test functions happens at collection -time. It is often a good idea to setup possibly expensive -resources only when the actual test is run. Here is a simple -example how you can achieve that:: +time. It is a good idea to setup expensive resources like DB +connections or subprocess only when the actual test is run. +Here is a simple example how you can achieve that, first +the actual test requiring a ``db`` object:: # content of test_backends.py @@ -85,17 +193,15 @@ if db.__class__.__name__ == "DB2": pytest.fail("deliberately failing for demo purposes") -Now we add a test configuration that takes care to generate -two invocations of the ``test_db_initialized`` function and -furthermore a factory that creates a database object when -each test is actually run:: +We can now add a test configuration that generates two invocations of +the ``test_db_initialized`` function and also implements a factory that +creates a database object for the actual test invocations:: # content of conftest.py def pytest_generate_tests(metafunc): if 'db' in metafunc.funcargnames: - metafunc.addcall(param="d1") - metafunc.addcall(param="d2") + metafunc.parametrize("db", ['d1', 'd2'], indirect=True) class DB1: "one database object" @@ -113,24 +219,24 @@ Let's first see how it looks like at collection time:: $ py.test test_backends.py --collectonly - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 collecting ... collected 2 items - - + + - ============================= in 0.01 seconds ============================= + =============================== in 0.01 seconds =============================== And then when we run the test:: $ py.test -q test_backends.py collecting ... collected 2 items .F - ================================= FAILURES ================================= - __________________________ test_db_initialized[1] __________________________ + =================================== FAILURES =================================== + ___________________________ test_db_initialized[d2] ____________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -141,32 +247,35 @@ test_backends.py:6: Failed 1 failed, 1 passed in 0.02 seconds -Now you see that one invocation of the test passes and another fails, -as it to be expected. +The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. + +.. regendoc:wipe Parametrizing test methods through per-class configuration -------------------------------------------------------------- .. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py + Here is an example ``pytest_generate_function`` function implementing a parametrization scheme similar to Michael Foords `unittest -parameterizer`_ in a lot less code:: +parameterizer`_ but in a lot less code:: # content of ./test_parametrize.py import pytest def pytest_generate_tests(metafunc): # called once per each test function - for funcargs in metafunc.cls.params[metafunc.function.__name__]: - # schedule a new test function run with applied **funcargs - metafunc.addcall(funcargs=funcargs) + funcarglist = metafunc.cls.params[metafunc.function.__name__] + argnames = list(funcarglist[0]) + metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] + for funcargs in funcarglist]) class TestClass: # a map specifying multiple argument sets for a test method params = { 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], - 'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)], + 'test_zerodivision': [dict(a=1, b=0), ], } def test_equals(self, a, b): @@ -175,114 +284,35 @@ def test_zerodivision(self, a, b): pytest.raises(ZeroDivisionError, "a/b") -Running it means we are two tests for each test functions, using -the respective settings:: +Our test generator looks up a class-level definition which specifies which +argument sets to use for each test function. Let's run it:: $ py.test -q - collecting ... collected 6 items - .FF..F - ================================= FAILURES ================================= - __________________________ test_db_initialized[1] __________________________ + collecting ... collected 3 items + F.. + =================================== FAILURES =================================== + __________________________ TestClass.test_equals[1-2] __________________________ - db = - - def test_db_initialized(db): - # a dummy test - if db.__class__.__name__ == "DB2": - > pytest.fail("deliberately failing for demo purposes") - E Failed: deliberately failing for demo purposes - - test_backends.py:6: Failed - _________________________ TestClass.test_equals[0] _________________________ - - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b E assert 1 == 2 - test_parametrize.py:17: AssertionError - ______________________ TestClass.test_zerodivision[1] ______________________ - - self = , a = 3, b = 2 - - def test_zerodivision(self, a, b): - > pytest.raises(ZeroDivisionError, "a/b") - E Failed: DID NOT RAISE - - test_parametrize.py:20: Failed - 3 failed, 3 passed in 0.05 seconds - -Parametrizing test methods through a decorator --------------------------------------------------------------- - -Modifying the previous example we can also allow decorators -for parametrizing test methods:: - - # content of test_parametrize2.py - - import pytest - - # test support code - def params(funcarglist): - def wrapper(function): - function.funcarglist = funcarglist - return function - return wrapper - - def pytest_generate_tests(metafunc): - for funcargs in getattr(metafunc.function, 'funcarglist', ()): - metafunc.addcall(funcargs=funcargs) - - # actual test code - class TestClass: - @params([dict(a=1, b=2), dict(a=3, b=3), ]) - def test_equals(self, a, b): - assert a == b - - @params([dict(a=1, b=0), dict(a=3, b=2)]) - def test_zerodivision(self, a, b): - pytest.raises(ZeroDivisionError, "a/b") - -Running it gives similar results as before:: - - $ py.test -q test_parametrize2.py - collecting ... collected 4 items - F..F - ================================= FAILURES ================================= - _________________________ TestClass.test_equals[0] _________________________ - - self = , a = 1, b = 2 - - @params([dict(a=1, b=2), dict(a=3, b=3), ]) - def test_equals(self, a, b): - > assert a == b - E assert 1 == 2 - - test_parametrize2.py:19: AssertionError - ______________________ TestClass.test_zerodivision[1] ______________________ - - self = , a = 3, b = 2 - - @params([dict(a=1, b=0), dict(a=3, b=2)]) - def test_zerodivision(self, a, b): - > pytest.raises(ZeroDivisionError, "a/b") - E Failed: DID NOT RAISE - - test_parametrize2.py:23: Failed - 2 failed, 2 passed in 0.04 seconds + test_parametrize.py:18: AssertionError + 1 failed, 2 passed in 0.03 seconds Checking serialization between Python interpreters -------------------------------------------------------------- Here is a stripped down real-life example of using parametrized -testing for testing serialization between different interpreters. +testing for testing serialization, invoking different python interpreters. We define a ``test_basic_objects`` function which is to be run with different sets of arguments for its three arguments:: -* ``python1``: first python interpreter -* ``python2``: second python interpreter -* ``obj``: object to be dumped from first interpreter and loaded into second interpreter +* ``python1``: first python interpreter, run to pickle-dump an object to a file +* ``python2``: second interpreter, run to pickle-load an object from a file +* ``obj``: object to be dumped/loaded .. literalinclude:: multipython.py @@ -290,5 +320,5 @@ . $ py.test -q multipython.py collecting ... collected 75 items - ssssss...ss...ss...ssssssssssss...ss...ss...ssssssssssss...ss...ss...ssssss - 27 passed, 48 skipped in 3.04 seconds + ssssssssssssssssss.........ssssss.........ssssss.........ssssssssssssssssss + 27 passed, 48 skipped in 4.87 seconds diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/funcargs.txt --- a/doc/funcargs.txt +++ b/doc/funcargs.txt @@ -61,14 +61,14 @@ Running the test looks like this:: $ py.test test_simplefactory.py - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 collecting ... collected 1 items test_simplefactory.py F - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + =================================== FAILURES =================================== + ________________________________ test_function _________________________________ myfuncarg = 42 @@ -77,7 +77,7 @@ E assert 42 == 17 test_simplefactory.py:5: AssertionError - ========================= 1 failed in 0.02 seconds ========================= + =========================== 1 failed in 0.02 seconds =========================== This means that indeed the test function was called with a ``myfuncarg`` argument value of ``42`` and the assert fails. Here is how py.test @@ -158,23 +158,22 @@ # content of test_example.py def pytest_generate_tests(metafunc): if "numiter" in metafunc.funcargnames: - for i in range(10): - metafunc.addcall(funcargs=dict(numiter=i)) + metafunc.parametrize("numiter", range(10)) def test_func(numiter): assert numiter < 9 -Running this:: +Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``:: $ py.test test_example.py - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 collecting ... collected 10 items test_example.py .........F - ================================= FAILURES ================================= - _______________________________ test_func[9] _______________________________ + =================================== FAILURES =================================== + _________________________________ test_func[9] _________________________________ numiter = 9 @@ -182,16 +181,16 @@ > assert numiter < 9 E assert 9 < 9 - test_example.py:7: AssertionError - ==================== 1 failed, 9 passed in 0.04 seconds ==================== + test_example.py:6: AssertionError + ====================== 1 failed, 9 passed in 0.07 seconds ====================== -Note that the ``pytest_generate_tests(metafunc)`` hook is called during +Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during the test collection phase which is separate from the actual test running. Let's just look at what is collected:: $ py.test --collectonly test_example.py - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 collecting ... collected 10 items @@ -205,37 +204,19 @@ - ============================= in 0.01 seconds ============================= + =============================== in 0.01 seconds =============================== If you want to select only the run with the value ``7`` you could do:: $ py.test -v -k 7 test_example.py # or -k test_func[7] - =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python + ============================= test session starts ============================== + platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 -- /Users/hpk/venv/1/bin/python collecting ... collected 10 items - test_example.py:6: test_func[0] PASSED - test_example.py:6: test_func[1] PASSED - test_example.py:6: test_func[2] PASSED - test_example.py:6: test_func[3] PASSED - test_example.py:6: test_func[4] PASSED - test_example.py:6: test_func[5] PASSED - test_example.py:6: test_func[6] PASSED - test_example.py:6: test_func[7] PASSED - test_example.py:6: test_func[8] PASSED - test_example.py:6: test_func[9] FAILED + test_example.py:5: test_func[7] PASSED - ================================= FAILURES ================================= - _______________________________ test_func[9] _______________________________ - - numiter = 9 - - def test_func(numiter): - > assert numiter < 9 - E assert 9 < 9 - - test_example.py:7: AssertionError - ==================== 1 failed, 9 passed in 0.05 seconds ==================== + ========================= 9 tests deselected by '-k7' ========================== + ==================== 1 passed, 9 deselected in 0.01 seconds ==================== You might want to look at :ref:`more parametrization examples `. @@ -259,4 +240,5 @@ ``metafunc.config``: access to command line opts and general config +.. automethod:: Metafunc.parametrize(name, values, idmaker=None) .. automethod:: Metafunc.addcall(funcargs=None, id=_notexists, param=_notexists) diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/mark.txt --- a/doc/mark.txt +++ b/doc/mark.txt @@ -10,9 +10,11 @@ metadata on your test functions. To begin with, there are some builtin markers, for example: -* skipif - skip a test function if a certain condition is met -* xfail - produce an "expected failure" outcome if a certain +* :ref:`skipif ` - skip a test function if a certain condition is met +* :ref:`xfail ` - produce an "expected failure" outcome if a certain condition is met +* :ref:`parametrize ` to perform multiple calls + to the same test function. It's also easy to create custom markers or to apply markers to whole test classes or modules. diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa doc/talks.txt --- a/doc/talks.txt +++ b/doc/talks.txt @@ -23,7 +23,8 @@ Test parametrization: -- `generating parametrized tests with funcargs`_ +- `generating parametrized tests with funcargs`_ (uses deprecated + ``addcall()`` API. - `test generators and cached setup`_ - `parametrizing tests, generalized`_ (blog post) - `putting test-hooks into local or global plugins`_ (blog post) diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev7', + version='2.2.0.dev8', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 179db3bcfdb557cea10779712d508762b91224fb -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa testing/test_python.py --- a/testing/test_python.py +++ b/testing/test_python.py @@ -520,12 +520,6 @@ if sys.version_info < (3,0): assert funcargs.getfuncargnames(A.f) == ['arg1'] -def test_callspec_repr(): - cs = funcargs.CallSpec({}, 'hello', 1) - repr(cs) - cs = funcargs.CallSpec({}, 'hello', funcargs._notexists) - repr(cs) - class TestFillFuncArgs: def test_fillfuncargs_exposed(self): # used by oejskit @@ -886,6 +880,7 @@ def function(): pass metafunc = funcargs.Metafunc(function) assert not metafunc.funcargnames + repr(metafunc._calls) def test_function_basic(self): def func(arg1, arg2="qwe"): pass @@ -925,9 +920,9 @@ metafunc.addcall(param=obj) metafunc.addcall(param=1) assert len(metafunc._calls) == 3 - assert metafunc._calls[0].param == obj - assert metafunc._calls[1].param == obj - assert metafunc._calls[2].param == 1 + assert metafunc._calls[0].getparam("arg1") == obj + assert metafunc._calls[1].getparam("arg1") == obj + assert metafunc._calls[2].getparam("arg1") == 1 def test_addcall_funcargs(self): def func(x): pass @@ -941,7 +936,119 @@ assert metafunc._calls[1].funcargs == {'x': 3} assert not hasattr(metafunc._calls[1], 'param') -class TestGenfuncFunctional: + def test_parametrize_error(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + metafunc.parametrize("x", [1,2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) + metafunc.parametrize("y", [1,2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) + + def test_parametrize_and_id(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + + metafunc.parametrize("x", [1,2], ids=['basic', 'advanced']) + metafunc.parametrize("y", ["abc", "def"]) + ids = [x.id for x in metafunc._calls] + assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"] + + def test_parametrize_with_userobjects(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + class A: + pass + metafunc.parametrize("x", [A(), A()]) + metafunc.parametrize("y", list("ab")) + assert metafunc._calls[0].id == ".0-a" + assert metafunc._calls[1].id == ".0-b" + assert metafunc._calls[2].id == ".1-a" + assert metafunc._calls[3].id == ".1-b" + + def test_addcall_and_parametrize(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + metafunc.addcall({'x': 1}) + metafunc.parametrize('y', [2,3]) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2} + assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3} + assert metafunc._calls[0].id == "0-2" + assert metafunc._calls[1].id == "0-3" + + def test_parametrize_indirect(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + metafunc.parametrize('x', [1], indirect=True) + metafunc.parametrize('y', [2,3], indirect=True) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == {} + assert metafunc._calls[1].funcargs == {} + assert metafunc._calls[0].params == dict(x=1,y=2) + assert metafunc._calls[1].params == dict(x=1,y=3) + + def test_addcalls_and_parametrize_indirect(self): + def func(x, y): pass + metafunc = funcargs.Metafunc(func) + metafunc.addcall(param="123") + metafunc.parametrize('x', [1], indirect=True) + metafunc.parametrize('y', [2,3], indirect=True) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == {} + assert metafunc._calls[1].funcargs == {} + assert metafunc._calls[0].params == dict(x=1,y=2) + assert metafunc._calls[1].params == dict(x=1,y=3) + + def test_parametrize_functional(self, testdir): + testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.parametrize('x', [1,2], indirect=True) + metafunc.parametrize('y', [2]) + def pytest_funcarg__x(request): + return request.param * 10 + def pytest_funcarg__y(request): + return request.param + + def test_simple(x,y): + assert x in (10,20) + assert y == 2 + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_simple*1-2*", + "*test_simple*2-2*", + "*2 passed*", + ]) + + def test_parametrize_onearg(self): + metafunc = funcargs.Metafunc(lambda x: None) + metafunc.parametrize("x", [1,2]) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == dict(x=1) + assert metafunc._calls[0].id == "1" + assert metafunc._calls[1].funcargs == dict(x=2) + assert metafunc._calls[1].id == "2" + + def test_parametrize_onearg_indirect(self): + metafunc = funcargs.Metafunc(lambda x: None) + metafunc.parametrize("x", [1,2], indirect=True) + assert metafunc._calls[0].params == dict(x=1) + assert metafunc._calls[0].id == "1" + assert metafunc._calls[1].params == dict(x=2) + assert metafunc._calls[1].id == "2" + + def test_parametrize_twoargs(self): + metafunc = funcargs.Metafunc(lambda x,y: None) + metafunc.parametrize(("x", "y"), [(1,2), (3,4)]) + assert len(metafunc._calls) == 2 + assert metafunc._calls[0].funcargs == dict(x=1, y=2) + assert metafunc._calls[0].id == "1-2" + assert metafunc._calls[1].funcargs == dict(x=3, y=4) + assert metafunc._calls[1].id == "3-4" + +class TestMetafuncFunctional: def test_attributes(self, testdir): p = testdir.makepyfile(""" # assumes that generate/provide runs in the same process @@ -1109,6 +1216,46 @@ "*1 pass*", ]) + def test_parametrize_functional2(self, testdir): + testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.parametrize("arg1", [1,2]) + metafunc.parametrize("arg2", [4,5]) + def test_hello(arg1, arg2): + assert 0, (arg1, arg2) + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*(1, 4)*", + "*(1, 5)*", + "*(2, 4)*", + "*(2, 5)*", + "*4 failed*", + ]) + + def test_parametrize_and_inner_getfuncargvalue(self, testdir): + p = testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.parametrize("arg1", [1], indirect=True) + metafunc.parametrize("arg2", [10], indirect=True) + + def pytest_funcarg__arg1(request): + x = request.getfuncargvalue("arg2") + return x + request.param + + def pytest_funcarg__arg2(request): + return request.param + + def test_func1(arg1, arg2): + assert arg1 == 11 + """) + result = testdir.runpytest("-v", p) + result.stdout.fnmatch_lines([ + "*test_func1*1*PASS*", + "*1 passed*" + ]) + + def test_conftest_funcargs_only_available_in_subdir(testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 16:39:09 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 15:39:09 -0000 Subject: [py-svn] commit/pytest: alfredodeza: add padding to durations in rep.when Message-ID: <20111118153909.7161.23030@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/b22c3b301862/ changeset: b22c3b301862 user: alfredodeza date: 2011-11-18 15:59:39 summary: add padding to durations in rep.when affected #: 1 file diff -r 90eef85d16b131a62e7112a8bfb5a0d6fddc18fa -r b22c3b301862b6cbd2593cc4933b87a190ee3fff _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -43,7 +43,7 @@ for duration, rep in d2: nodeid = rep.nodeid.replace("::()::", "::") - tr.write_line("%02.2fs %s %s" % + tr.write_line("%02.2fs %-8s %s" % (duration, rep.when, nodeid)) def pytest_sessionstart(session): Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 17:02:26 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 16:02:26 -0000 Subject: [py-svn] commit/pytest: hpk42: fix issue90 - perform teardown after its actual test function/item. This is implemented by modifying the runtestprotocol to remember "pending" teardowns and call them before the setup of the next item. Message-ID: <20111118160226.17472.73402@bitbucket03.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/e776739a7800/ changeset: e776739a7800 user: hpk42 date: 2011-11-18 17:01:29 summary: fix issue90 - perform teardown after its actual test function/item. This is implemented by modifying the runtestprotocol to remember "pending" teardowns and call them before the setup of the next item. affected #: 10 files diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,8 @@ Changes between 2.1.3 and XXX 2.2.0 ---------------------------------------- +- fix issue90: introduce eager tearing down of test items so that + teardown function are called earlier. - add an all-powerful metafunc.parametrize function which allows to parametrize test function arguments in multiple steps and therefore from indepdenent plugins and palces. diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev8' +__version__ = '2.2.0.dev9' diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -163,17 +163,6 @@ def pytest_runtest_teardown(self, item): self.resumecapture_item(item) - def pytest__teardown_final(self, __multicall__, session): - method = self._getmethod(session.config, None) - self.resumecapture(method) - try: - rep = __multicall__.execute() - finally: - outerr = self.suspendcapture() - if rep: - addouterr(rep, outerr) - return rep - def pytest_keyboard_interrupt(self, excinfo): if hasattr(self, '_capturing'): self.suspendcapture() diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -82,11 +82,11 @@ session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") + if initstate >= 2: + config.hook.pytest_sessionfinish(session=session, + exitstatus=session.exitstatus or (session._testsfailed and 1)) if not session.exitstatus and session._testsfailed: session.exitstatus = EXIT_TESTSFAILED - if initstate >= 2: - config.hook.pytest_sessionfinish(session=session, - exitstatus=session.exitstatus) if initstate >= 1: config.pluginmanager.do_unconfigure(config) return session.exitstatus @@ -106,7 +106,7 @@ def pytest_runtestloop(session): if session.config.option.collectonly: return True - for item in session.session.items: + for item in session.items: item.config.hook.pytest_runtest_protocol(item=item) if session.shouldstop: raise session.Interrupted(session.shouldstop) diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -355,9 +355,11 @@ if not plugins: plugins = [] plugins.append(Collect()) - self.pytestmain(list(args), plugins=[Collect()]) + ret = self.pytestmain(list(args), plugins=[Collect()]) + reprec = rec[0] + reprec.ret = ret assert len(rec) == 1 - return items, rec[0] + return items, reprec def parseconfig(self, *args): args = [str(x) for x in args] diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -387,6 +387,7 @@ tw.line() tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector): def collect(self): # test generators are seen as collectors but they also diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -60,19 +60,33 @@ def __init__(self, location): self.location = location +def perform_pending_teardown(config, nextitem): + try: + olditem, log = config._pendingteardown + except AttributeError: + pass + else: + del config._pendingteardown + olditem.nextitem = nextitem + call_and_report(olditem, "teardown", log) + def pytest_runtest_protocol(item): + perform_pending_teardown(item.config, item) item.ihook.pytest_runtest_logstart( nodeid=item.nodeid, location=item.location, ) - runtestprotocol(item) + runtestprotocol(item, teardowndelayed=True) return True -def runtestprotocol(item, log=True): +def runtestprotocol(item, log=True, teardowndelayed=False): rep = call_and_report(item, "setup", log) reports = [rep] if rep.passed: reports.append(call_and_report(item, "call", log)) - reports.append(call_and_report(item, "teardown", log)) + if teardowndelayed: + item.config._pendingteardown = item, log + else: + reports.append(call_and_report(item, "teardown", log)) return reports def pytest_runtest_setup(item): @@ -85,12 +99,13 @@ item.session._setupstate.teardown_exact(item) def pytest__teardown_final(session): - call = CallInfo(session._setupstate.teardown_all, when="teardown") - if call.excinfo: - ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) - call.excinfo.traceback = ntraceback.filter() - longrepr = call.excinfo.getrepr(funcargs=True) - return TeardownErrorReport(longrepr) + perform_pending_teardown(session.config, None) + #call = CallInfo(session._setupstate.teardown_all, when="teardown") + #if call.excinfo: + # ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir) + # call.excinfo.traceback = ntraceback.filter() + # longrepr = call.excinfo.getrepr(funcargs=True) + # return TeardownErrorReport(longrepr) def pytest_report_teststatus(report): if report.when in ("setup", "teardown"): @@ -325,19 +340,28 @@ assert not self._finalizers def teardown_exact(self, item): - if self.stack and item == self.stack[-1]: + try: + colitem = item.nextitem + except AttributeError: + # in distributed testing there might be no known nexitem + # and in this case we use the parent node to at least call + # teardown of the current item + colitem = item.parent + needed_collectors = colitem and colitem.listchain() or [] + self._teardown_towards(needed_collectors) + + def _teardown_towards(self, needed_collectors): + while self.stack: + if self.stack == needed_collectors[:len(self.stack)]: + break self._pop_and_teardown() - else: - self._callfinalizers(item) def prepare(self, colitem): """ setup objects along the collector chain to the test-method and teardown previously setup objects.""" needed_collectors = colitem.listchain() - while self.stack: - if self.stack == needed_collectors[:len(self.stack)]: - break - self._pop_and_teardown() + self._teardown_towards(needed_collectors) + # check if the last collection node has raised an error for col in self.stack: if hasattr(col, '_prepare_exc'): diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -1,8 +1,8 @@ -py.test 2.2.0: improved test markers and duration profiling +py.test 2.2.0: test marking++, parametrization++ and duration profiling =========================================================================== -pytest-2.2.0 is a quite [1] backward compatible release of the popular -py.test testing tool. There are a couple of new features: +pytest-2.2.0 is a test-suite compatible release of the popular +py.test testing tool. There are a couple of new features and improvements: * "--duration=N" option showing the N slowest test execution or setup/teardown calls. @@ -16,8 +16,13 @@ a new "markers" ini-variable for registering test markers. The new "--strict" option will bail out with an error if you are using unregistered markers. +* teardown functions are now more eagerly called so that they appear + more directly connected to the last test item that needed a particular + fixture/setup. + Usage of improved parametrize is documented in examples at http://pytest.org/latest/example/parametrize.html + Usages of the improved marking mechanism is illustrated by a couple of initial examples, see http://pytest.org/latest/example/markers.html @@ -40,9 +45,11 @@ holger krekel -[1] notes on incompatibility +notes on incompatibility ------------------------------ +While test suites should work unchanged you might need to upgrade plugins: + * You need a new version of the pytest-xdist plugin (1.7) for distributing test runs. diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev8', + version='2.2.0.dev9', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r b22c3b301862b6cbd2593cc4933b87a190ee3fff -r e776739a7800d45ba9e55c91ad33827a16b90420 testing/test_runner.py --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -160,6 +160,45 @@ #assert rep.failed.where.path.basename == "test_func.py" #assert rep.failed.failurerepr == "hello" + def test_teardown_final_returncode(self, testdir): + rec = testdir.inline_runsource(""" + def test_func(): + pass + def teardown_function(func): + raise ValueError(42) + """) + assert rec.ret == 1 + + def test_exact_teardown_issue90(self, testdir): + rec = testdir.inline_runsource(""" + import pytest + + class TestClass: + def test_method(self): + pass + def teardown_class(cls): + raise Exception() + + def test_func(): + pass + def teardown_function(func): + raise ValueError(42) + """) + reps = rec.getreports("pytest_runtest_logreport") + print (reps) + for i in range(2): + assert reps[i].nodeid.endswith("test_method") + assert reps[i].passed + assert reps[2].when == "teardown" + assert reps[2].failed + assert len(reps) == 6 + for i in range(3,5): + assert reps[i].nodeid.endswith("test_func") + assert reps[i].passed + assert reps[5].when == "teardown" + assert reps[5].nodeid.endswith("test_func") + assert reps[5].failed + def test_failure_in_setup_function_ignores_custom_repr(self, testdir): testdir.makepyfile(conftest=""" import pytest Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 17:03:11 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 16:03:11 -0000 Subject: [py-svn] commit/pytest-xdist: hpk42: small fixes to test suite Message-ID: <20111118160311.17472.70425@bitbucket03.managed.contegix.com> 1 new commit in pytest-xdist: https://bitbucket.org/hpk42/pytest-xdist/changeset/42ef12993d2a/ changeset: 42ef12993d2a user: hpk42 date: 2011-11-18 17:03:02 summary: small fixes to test suite affected #: 3 files diff -r 017a73903b6f50f3ad201be5c8b7465111e5026c -r 42ef12993d2a2046db098b02cef2552df7f94d3f CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,7 +1,8 @@ 1.7 ------------------------- -- fix incompatibilities with pytest-2.2.0 +- fix incompatibilities with pytest-2.2.0 (allow multiple + pytest_runtest_logreport reports for a test item) 1.6 ------------------------- diff -r 017a73903b6f50f3ad201be5c8b7465111e5026c -r 42ef12993d2a2046db098b02cef2552df7f94d3f testing/test_remote.py --- a/testing/test_remote.py +++ b/testing/test_remote.py @@ -195,13 +195,13 @@ ids = ev.kwargs['ids'] assert len(ids) == 2 slave.sendcommand("runtests_all", ) + slave.sendcommand("shutdown", ) for func in "::test_func", "::test_func2": for i in range(3): # setup/call/teardown ev = slave.popevent("testreport") assert ev.name == "testreport" rep = unserialize_report(ev.name, ev.kwargs['data']) assert rep.nodeid.endswith(func) - slave.sendcommand("shutdown") ev = slave.popevent("slavefinished") assert 'slaveoutput' in ev.kwargs diff -r 017a73903b6f50f3ad201be5c8b7465111e5026c -r 42ef12993d2a2046db098b02cef2552df7f94d3f xdist/plugin.py --- a/xdist/plugin.py +++ b/xdist/plugin.py @@ -97,6 +97,7 @@ from xdist.slavemanage import unserialize_report def runforked(): try: + item.nextitem = None reports = runtestprotocol(item, log=False) except KeyboardInterrupt: py.std.os._exit(EXITSTATUS_TESTEXIT) Repository URL: https://bitbucket.org/hpk42/pytest-xdist/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 17:35:15 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 16:35:15 -0000 Subject: [py-svn] commit/pytest: hpk42: fix compat with testcases from trial-11.1.0 Message-ID: <20111118163515.7161.92600@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/16af87f5ea63/ changeset: 16af87f5ea63 user: hpk42 date: 2011-11-18 17:34:46 summary: fix compat with testcases from trial-11.1.0 affected #: 2 files diff -r e776739a7800d45ba9e55c91ad33827a16b90420 -r 16af87f5ea632158d618774caa93efc71879daee CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -28,6 +28,7 @@ - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives +- fix compatibility with twisted/trial-11.1.0 use cases Changes between 2.1.2 and 2.1.3 ---------------------------------------- diff -r e776739a7800d45ba9e55c91ad33827a16b90420 -r 16af87f5ea632158d618774caa93efc71879daee _pytest/unittest.py --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -120,14 +120,19 @@ ut = sys.modules['twisted.python.failure'] Failure__init__ = ut.Failure.__init__.im_func check_testcase_implements_trial_reporter() - def excstore(self, exc_value=None, exc_type=None, exc_tb=None): + def excstore(self, exc_value=None, exc_type=None, exc_tb=None, + captureVars=None): if exc_value is None: self._rawexcinfo = sys.exc_info() else: if exc_type is None: exc_type = type(exc_value) self._rawexcinfo = (exc_type, exc_value, exc_tb) - Failure__init__(self, exc_value, exc_type, exc_tb) + try: + Failure__init__(self, exc_value, exc_type, exc_tb, + captureVars=captureVars) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) ut.Failure.__init__ = excstore try: return __multicall__.execute() Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 17:59:07 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 16:59:07 -0000 Subject: [py-svn] commit/pytest: hpk42: remove a hack that isn't needed because runtestprotocol now memorizes pending teardowns and thus dist-testing has "exact" teardowns as well Message-ID: <20111118165907.7160.38722@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/c7127c5e5ee3/ changeset: c7127c5e5ee3 user: hpk42 date: 2011-11-18 17:58:21 summary: remove a hack that isn't needed because runtestprotocol now memorizes pending teardowns and thus dist-testing has "exact" teardowns as well affected #: 4 files diff -r 16af87f5ea632158d618774caa93efc71879daee -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev9' +__version__ = '2.2.0.dev10' diff -r 16af87f5ea632158d618774caa93efc71879daee -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 _pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -330,6 +330,8 @@ """ a basic test invocation item. Note that for a single function there might be multiple test invocation items. """ + nextitem = None + def reportinfo(self): return self.fspath, None, "" diff -r 16af87f5ea632158d618774caa93efc71879daee -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -340,13 +340,7 @@ assert not self._finalizers def teardown_exact(self, item): - try: - colitem = item.nextitem - except AttributeError: - # in distributed testing there might be no known nexitem - # and in this case we use the parent node to at least call - # teardown of the current item - colitem = item.parent + colitem = item.nextitem needed_collectors = colitem and colitem.listchain() or [] self._teardown_towards(needed_collectors) diff -r 16af87f5ea632158d618774caa93efc71879daee -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev9', + version='2.2.0.dev10', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 18:01:47 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 17:01:47 -0000 Subject: [py-svn] commit/pytest-xdist: hpk42: remove unncessary hack here as well Message-ID: <20111118170147.2719.74032@bitbucket02.managed.contegix.com> 1 new commit in pytest-xdist: https://bitbucket.org/hpk42/pytest-xdist/changeset/20875fed94e7/ changeset: 20875fed94e7 user: hpk42 date: 2011-11-18 18:01:39 summary: remove unncessary hack here as well affected #: 1 file diff -r 42ef12993d2a2046db098b02cef2552df7f94d3f -r 20875fed94e7f3dff50bdf762df91153b15ceca6 xdist/plugin.py --- a/xdist/plugin.py +++ b/xdist/plugin.py @@ -97,7 +97,6 @@ from xdist.slavemanage import unserialize_report def runforked(): try: - item.nextitem = None reports = runtestprotocol(item, log=False) except KeyboardInterrupt: py.std.os._exit(EXITSTATUS_TESTEXIT) Repository URL: https://bitbucket.org/hpk42/pytest-xdist/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 18:35:41 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 17:35:41 -0000 Subject: [py-svn] commit/pytest: hpk42: finally fixing a bug that resulted in sometimes-failing duration tests (doh) Message-ID: <20111118173541.1122.12453@bitbucket13.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/56fdd8bb08ca/ changeset: 56fdd8bb08ca user: hpk42 date: 2011-11-18 18:35:23 summary: finally fixing a bug that resulted in sometimes-failing duration tests (doh) affected #: 3 files diff -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 -r 56fdd8bb08ca14b168993d44c7dfa5147f06dfff _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev10' +__version__ = '2.2.0.dev11' diff -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 -r 56fdd8bb08ca14b168993d44c7dfa5147f06dfff _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -25,23 +25,22 @@ if durations is None: return tr = terminalreporter - duration2rep = {} - for key, replist in tr.stats.items(): + dlist = [] + for replist in tr.stats.values(): for rep in replist: if hasattr(rep, 'duration'): - duration2rep[rep.duration] = rep - if not duration2rep: + dlist.append((rep.duration, rep)) + if not dlist: return - d2 = list(duration2rep.items()) - d2.sort() - d2.reverse() + dlist.sort() + dlist.reverse() if not durations: tr.write_sep("=", "slowest test durations") else: tr.write_sep("=", "slowest %s test durations" % durations) - d2 = d2[:durations] + dlist = dlist[:durations] - for duration, rep in d2: + for duration, rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %-8s %s" % (duration, rep.when, nodeid)) diff -r c7127c5e5ee38d8ecb7683f5b4dd5ff44c107154 -r 56fdd8bb08ca14b168993d44c7dfa5147f06dfff setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev10', + version='2.2.0.dev11', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 19:04:13 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 18:04:13 -0000 Subject: [py-svn] commit/pytest: hpk42: another try to properly fix durations sorting (still producing sometimes failing tests, apparently when two durations of a test report are identical) Message-ID: <20111118180413.7160.32494@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/bc206c7d9628/ changeset: bc206c7d9628 user: hpk42 date: 2011-11-18 18:59:52 summary: another try to properly fix durations sorting (still producing sometimes failing tests, apparently when two durations of a test report are identical) affected #: 1 file diff -r 56fdd8bb08ca14b168993d44c7dfa5147f06dfff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 _pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -29,10 +29,10 @@ for replist in tr.stats.values(): for rep in replist: if hasattr(rep, 'duration'): - dlist.append((rep.duration, rep)) + dlist.append(rep) if not dlist: return - dlist.sort() + dlist.sort(key=lambda x: x.duration) dlist.reverse() if not durations: tr.write_sep("=", "slowest test durations") @@ -40,10 +40,10 @@ tr.write_sep("=", "slowest %s test durations" % durations) dlist = dlist[:durations] - for duration, rep in dlist: + for rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %-8s %s" % - (duration, rep.when, nodeid)) + (rep.duration, rep.when, nodeid)) def pytest_sessionstart(session): session._setupstate = SetupState() Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 19:32:50 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 18:32:50 -0000 Subject: [py-svn] commit/pytest: hpk42: improve release announcement, shift and fix examples a bit. Bump version to 2.2.0 Message-ID: <20111118183250.29930.89959@bitbucket12.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/ccd5794f5850/ changeset: ccd5794f5850 user: hpk42 date: 2011-11-18 19:32:11 summary: improve release announcement, shift and fix examples a bit. Bump version to 2.2.0 affected #: 27 files diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,4 +1,4 @@ -Changes between 2.1.3 and XXX 2.2.0 +Changes between 2.1.3 and 2.2.0 ---------------------------------------- - fix issue90: introduce eager tearing down of test items so that diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb _pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.0.dev11' +__version__ = '2.2.0' diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb _pytest/mark.py --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -16,9 +16,9 @@ group._addoption("-m", action="store", dest="markexpr", default="", metavar="MARKEXPR", - help="only run tests which match given mark expression. " - "An expression is a python expression which can use " - "marker names.") + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'." + ) group.addoption("--markers", action="store_true", help= "show markers (builtin, plugin and per-project ones).") diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb _pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -39,7 +39,7 @@ config.addinivalue_line("markers", "parametrize(argnames, argvalues): call a test function multiple " "times passing in multiple different argument value sets. Example: " - "@parametrize(arg1, [1,2]) would lead to two calls of the decorated " + "@parametrize('arg1', [1,2]) would lead to two calls of the decorated " "test function, one with arg1=1 and another with arg1=2." ) diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -2,29 +2,30 @@ =========================================================================== pytest-2.2.0 is a test-suite compatible release of the popular -py.test testing tool. There are a couple of new features and improvements: +py.test testing tool. Plugins might need upgrades. It comes +with these improvements: -* "--duration=N" option showing the N slowest test execution - or setup/teardown calls. +* more powerful parametrization of tests: -* @pytest.mark.parametrize decorator for runnin test functions - with multiple values and a new more powerful metafunc.parametrize() - helper to be used from pytest_generate_tests. Multiple parametrize - functions can now be invoked for the same test function. + - new @pytest.mark.parametrize decorator for running test functions + - new metafunc.parametrize() API for parametrizing arguments independently + - see examples at http://pytest.org/latest/example/parametrize.html + - NOTE that parametrize() related APIs are still a bit experimental + and might change in future releases. -* "-m markexpr" option for selecting tests according to their mark and - a new "markers" ini-variable for registering test markers. The new "--strict" - option will bail out with an error if you are using unregistered markers. +* improved handling of test markers and refined marking mechanism: -* teardown functions are now more eagerly called so that they appear - more directly connected to the last test item that needed a particular - fixture/setup. + - "-m markexpr" option for selecting tests according to their mark + - a new "markers" ini-variable for registering test markers for your project + - the new "--strict" bails out with an error if using unregistered markers. + - see examples at http://pytest.org/latest/example/markers.html -Usage of improved parametrize is documented in examples at -http://pytest.org/latest/example/parametrize.html +* duration profiling: new "--duration=N" option showing the N slowest test + execution or setup/teardown calls. This is most useful if you want to + find out where your slowest test code is. -Usages of the improved marking mechanism is illustrated by a couple -of initial examples, see http://pytest.org/latest/example/markers.html +* also 2.2.0 performs more eager calling of teardown/finalizers functions + resulting in better and more accurate reporting when they fail Besides there is the usual set of bug fixes along with a cleanup of pytest's own test suite allowing it to run on a wider range of environments. @@ -38,8 +39,8 @@ pip install -U pytest # or easy_install -U pytest -Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their -help and feedback on various issues. +Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, +Alfredo Doza and all who gave feedback or sent bug reports. best, holger krekel @@ -55,8 +56,41 @@ * Other plugins might need an upgrade if they implement the ``pytest_runtest_logreport`` hook which now is called unconditionally - for the setup/teardown fixture phases of a test. You can just choose to - ignore them by inserting "if rep.when != 'call': return". Note that - most code probably "just" works because the hook was already called - for failing setup/teardown phases of a test. + for the setup/teardown fixture phases of a test. You may choose to + ignore setup/teardown failures by inserting "if rep.when != 'call': return" + or something similar. Note that most code probably "just" works because + the hook was already called for failing setup/teardown phases of a test + so a plugin should have been ready to grok such reports already. + +Changes between 2.1.3 and 2.2.0 +---------------------------------------- + +- fix issue90: introduce eager tearing down of test items so that + teardown function are called earlier. +- add an all-powerful metafunc.parametrize function which allows to + parametrize test function arguments in multiple steps and therefore + from indepdenent plugins and palces. +- add a @pytest.mark.parametrize helper which allows to easily + call a test function with different argument values +- Add examples to the "parametrize" example page, including a quick port + of Test scenarios and the new parametrize function and decorator. +- introduce registration for "pytest.mark.*" helpers via ini-files + or through plugin hooks. Also introduce a "--strict" option which + will treat unregistered markers as errors + allowing to avoid typos and maintain a well described set of markers + for your test suite. See exaples at http://pytest.org/latest/mark.html + and its links. +- issue50: introduce "-m marker" option to select tests based on markers + (this is a stricter and more predictable version of '-k' in that "-m" + only matches complete markers and has more obvious rules for and/or + semantics. +- new feature to help optimizing the speed of your tests: + --durations=N option for displaying N slowest test calls + and setup/teardown methods. +- fix issue87: --pastebin now works with python3 +- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly +- fix and cleanup pytest's own test suite to not leak FDs +- fix issue83: link to generated funcarg list +- fix issue74: pyarg module names are now checked against imp.find_module false positives +- fix compatibility with twisted/trial-11.1.0 use cases diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/assert.txt --- a/doc/assert.txt +++ b/doc/assert.txt @@ -23,7 +23,7 @@ $ py.test test_assert1.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_assert1.py F @@ -37,7 +37,7 @@ E + where 3 = f() test_assert1.py:5: AssertionError - ========================= 1 failed in 0.03 seconds ========================= + ========================= 1 failed in 0.02 seconds ========================= py.test has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -105,7 +105,7 @@ $ py.test test_assert2.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_assert2.py F diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/builtin.txt --- a/doc/builtin.txt +++ b/doc/builtin.txt @@ -28,7 +28,7 @@ $ py.test --funcargs =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collected 0 items pytestconfig the pytest config object with access to command line opts. @@ -75,7 +75,5 @@ See http://docs.python.org/library/warnings.html for information on warning categories. - cov - A pytest funcarg that provides access to the underlying coverage object. ============================= in 0.00 seconds ============================= diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/capture.txt --- a/doc/capture.txt +++ b/doc/capture.txt @@ -64,7 +64,7 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items test_module.py .F @@ -78,8 +78,8 @@ test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ - setting up - ==================== 1 failed, 1 passed in 0.03 seconds ==================== + setting up + ==================== 1 failed, 1 passed in 0.02 seconds ==================== Accessing captured output from a test function --------------------------------------------------- diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/doctest.txt --- a/doc/doctest.txt +++ b/doc/doctest.txt @@ -44,10 +44,9 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items mymodule.py . - ========================= 1 passed in 0.06 seconds ========================= - [?1034h \ No newline at end of file + ========================= 1 passed in 0.05 seconds ========================= diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/markers.txt --- a/doc/example/markers.txt +++ b/doc/example/markers.txt @@ -1,9 +1,178 @@ + +.. _`mark examples`: Working with custom markers ================================================= +Here are some example using the :ref:`mark` mechanism. -Here are some example using the :ref:`mark` mechanism. +marking test functions and selecting them for a run +---------------------------------------------------- + +You can "mark" a test function with custom metadata like this:: + + # content of test_server.py + + import pytest + @pytest.mark.webtest + def test_send_http(): + pass # perform some webtest test for your app + def test_something_quick(): + pass + +.. versionadded:: 2.2 + +You can then restrict a test run to only run tests marked with ``webtest``:: + + $ py.test -v -m webtest + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python + collecting ... collected 2 items + + test_server.py:3: test_send_http PASSED + + =================== 1 tests deselected by "-m 'webtest'" =================== + ================== 1 passed, 1 deselected in 0.01 seconds ================== + +Or the inverse, running all tests except the webtest ones:: + + $ py.test -v -m "not webtest" + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python + collecting ... collected 2 items + + test_server.py:6: test_something_quick PASSED + + ================= 1 tests deselected by "-m 'not webtest'" ================= + ================== 1 passed, 1 deselected in 0.01 seconds ================== + +Registering markers +------------------------------------- + +.. versionadded:: 2.2 + +.. ini-syntax for custom markers: + +Registering markers for your test suite is simple:: + + # content of pytest.ini + [pytest] + markers = + webtest: mark a test as a webtest. + +You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:: + + $ py.test --markers + @pytest.mark.webtest: mark a test as a webtest. + + @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. + + @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. + + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2. + + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. + + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. + + +For an example on how to add and work with markers from a plugin, see +:ref:`adding a custom marker from a plugin`. + +.. note:: + + It is recommended to explicitely register markers so that: + + * there is one place in your test suite defining your markers + + * asking for existing markers via ``py.test --markers`` gives good output + + * typos in function markers are treated as an error if you use + the ``--strict`` option. Later versions of py.test are probably + going to treat non-registered markers as an error. + +.. _`scoped-marking`: + +Marking whole classes or modules +---------------------------------------------------- + +If you are programming with Python2.6 you may use ``pytest.mark`` decorators +with classes to apply markers to all of its test methods:: + + # content of test_mark_classlevel.py + import pytest + @pytest.mark.webtest + class TestClass: + def test_startup(self): + pass + def test_startup_and_more(self): + pass + +This is equivalent to directly applying the decorator to the +two test functions. + +To remain backward-compatible with Python2.4 you can also set a +``pytestmark`` attribute on a TestClass like this:: + + import pytest + + class TestClass: + pytestmark = pytest.mark.webtest + +or if you need to use multiple markers you can use a list:: + + import pytest + + class TestClass: + pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] + +You can also set a module level marker:: + + import pytest + pytestmark = pytest.mark.webtest + +in which case it will be applied to all functions and +methods defined in the module. + +Using ``-k TEXT`` to select tests +---------------------------------------------------- + +You can use the ``-k`` command line option to only run tests with names that match the given argument:: + + $ py.test -k send_http # running with the above defined examples + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 4 items + + test_server.py . + + =================== 3 tests deselected by '-ksend_http' ==================== + ================== 1 passed, 3 deselected in 0.02 seconds ================== + +And you can also run all tests except the ones that match the keyword:: + + $ py.test -k-send_http + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 4 items + + test_mark_classlevel.py .. + test_server.py . + + =================== 1 tests deselected by '-k-send_http' =================== + ================== 3 passed, 1 deselected in 0.03 seconds ================== + +Or to only select the class:: + + $ py.test -kTestClass + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 4 items + + test_mark_classlevel.py .. + + =================== 2 tests deselected by '-kTestClass' ==================== + ================== 2 passed, 2 deselected in 0.02 seconds ================== .. _`adding a custom marker from a plugin`: @@ -49,34 +218,42 @@ the test needs:: $ py.test -E stage2 - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 - collecting ... collected 1 items + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 5 items + test_mark_classlevel.py .. + test_server.py .. test_someenv.py s - ========================== 1 skipped in 0.02 seconds =========================== + =================== 4 passed, 1 skipped in 0.04 seconds ==================== and here is one that specifies exactly the environment needed:: $ py.test -E stage1 - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 - collecting ... collected 1 items + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 5 items + test_mark_classlevel.py .. + test_server.py .. test_someenv.py . - =========================== 1 passed in 0.02 seconds =========================== + ========================= 5 passed in 0.04 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ py.test --markers + @pytest.mark.webtest: mark a test as a webtest. + @pytest.mark.env(name): mark test to run only on named environment @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2. + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/multipython.py --- a/doc/example/multipython.py +++ b/doc/example/multipython.py @@ -2,18 +2,20 @@ module containing a parametrized tests testing cross-python serialization via the pickle module. """ -import py +import py, pytest pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8'] def pytest_generate_tests(metafunc): + # we parametrize all "python1" and "python2" arguments to iterate + # over the python interpreters of our list above - the actual + # setup and lookup of interpreters in the python1/python2 factories + # respectively. for arg in metafunc.funcargnames: - if arg.startswith("python"): + if arg in ("python1", "python2"): metafunc.parametrize(arg, pythonlist, indirect=True) - elif arg == "obj": - metafunc.parametrize("obj", metafunc.function.multiarg.kwargs['obj']) - at py.test.mark.multiarg(obj=[42, {}, {1:3},]) + at pytest.mark.parametrize("obj", [42, {}, {1:3},]) def test_basic_objects(python1, python2, obj): python1.dumps(obj) python2.load_and_is_true("obj == %s" % obj) diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/mysetup.txt --- a/doc/example/mysetup.txt +++ b/doc/example/mysetup.txt @@ -49,7 +49,7 @@ $ py.test test_sample.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_sample.py F @@ -57,7 +57,7 @@ ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - mysetup = + mysetup = def test_answer(mysetup): app = mysetup.myapp() @@ -122,12 +122,12 @@ $ py.test test_ssh.py -rs =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_ssh.py s ========================= short test summary info ========================== - SKIP [1] /Users/hpk/tmp/doc-exec-167/conftest.py:22: specify ssh host with --ssh + SKIP [1] /Users/hpk/tmp/doc-exec-625/conftest.py:22: specify ssh host with --ssh ======================== 1 skipped in 0.02 seconds ========================= diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/nonpython.txt --- a/doc/example/nonpython.txt +++ b/doc/example/nonpython.txt @@ -27,7 +27,7 @@ nonpython $ py.test test_simple.yml =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items test_simple.yml .F @@ -37,7 +37,7 @@ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.09 seconds ==================== + ==================== 1 failed, 1 passed in 0.10 seconds ==================== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more @@ -56,7 +56,7 @@ nonpython $ py.test -v =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python + platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python collecting ... collected 2 items test_simple.yml:1: usecase: ok PASSED @@ -74,7 +74,7 @@ nonpython $ py.test --collectonly =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/parametrize.txt --- a/doc/example/parametrize.txt +++ b/doc/example/parametrize.txt @@ -17,8 +17,10 @@ .. versionadded:: 2.2 -The builtin ``parametrize`` marker allows you to easily write generic -test functions that will be invoked with multiple input/output values:: +The builtin ``pytest.mark.parametrize`` decorator directly enables +parametrization of arguments for a test function. Here is an example +of a test function that wants to compare that processing some input +results in expected output:: # content of test_expectation.py import pytest @@ -30,14 +32,14 @@ def test_eval(input, expected): assert eval(input) == expected -Here we parametrize two arguments of the test function so that the test +we parametrize two arguments of the test function so that the test function is called three times. Let's run it:: $ py.test -q collecting ... collected 3 items ..F - =================================== FAILURES =================================== - ______________________________ test_eval[6*9-42] _______________________________ + ================================= FAILURES ================================= + ____________________________ test_eval[6*9-42] _____________________________ input = '6*9', expected = 42 @@ -51,7 +53,7 @@ E assert 54 == 42 E + where 54 = eval('6*9') - test_expectation.py:9: AssertionError + test_expectation.py:8: AssertionError 1 failed, 2 passed in 0.03 seconds As expected only one pair of input/output values fails the simple test function. @@ -102,8 +104,8 @@ $ py.test -q --all collecting ... collected 5 items ....F - =================================== FAILURES =================================== - _______________________________ test_compute[4] ________________________________ + ================================= FAILURES ================================= + _____________________________ test_compute[4] ______________________________ param1 = 4 @@ -151,20 +153,20 @@ this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items test_scenarios.py .. - =========================== 2 passed in 0.02 seconds =========================== + ========================= 2 passed in 0.02 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: $ py.test --collectonly test_scenarios.py - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items @@ -172,7 +174,7 @@ - =============================== in 0.01 seconds =============================== + ============================= in 0.01 seconds ============================= Deferring the setup of parametrized resources --------------------------------------------------- @@ -219,24 +221,24 @@ Let's first see how it looks like at collection time:: $ py.test test_backends.py --collectonly - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items - =============================== in 0.01 seconds =============================== + ============================= in 0.01 seconds ============================= And then when we run the test:: $ py.test -q test_backends.py collecting ... collected 2 items .F - =================================== FAILURES =================================== - ___________________________ test_db_initialized[d2] ____________________________ + ================================= FAILURES ================================= + _________________________ test_db_initialized[d2] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -290,10 +292,10 @@ $ py.test -q collecting ... collected 3 items F.. - =================================== FAILURES =================================== - __________________________ TestClass.test_equals[1-2] __________________________ + ================================= FAILURES ================================= + ________________________ TestClass.test_equals[1-2] ________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b @@ -302,13 +304,13 @@ test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.03 seconds -Checking serialization between Python interpreters +Indirect parametrization with multiple resources -------------------------------------------------------------- Here is a stripped down real-life example of using parametrized testing for testing serialization, invoking different python interpreters. We define a ``test_basic_objects`` function which is to be run -with different sets of arguments for its three arguments:: +with different sets of arguments for its three arguments: * ``python1``: first python interpreter, run to pickle-dump an object to a file * ``python2``: second interpreter, run to pickle-load an object from a file @@ -316,9 +318,12 @@ .. literalinclude:: multipython.py -Running it (with Python-2.4 through to Python2.7 installed):: +Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: - . $ py.test -q multipython.py + . $ py.test -rs -q multipython.py collecting ... collected 75 items ssssssssssssssssss.........ssssss.........ssssss.........ssssssssssssssssss - 27 passed, 48 skipped in 4.87 seconds + ========================= short test summary info ========================== + SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.8' not found + SKIP [24] /Users/hpk/p/pytest/doc/example/multipython.py:36: 'python2.4' not found + 27 passed, 48 skipped in 3.03 seconds diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/pythoncollection.txt --- a/doc/example/pythoncollection.txt +++ b/doc/example/pythoncollection.txt @@ -43,7 +43,7 @@ $ py.test --collectonly =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items @@ -82,7 +82,7 @@ . $ py.test --collectonly pythoncollection.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 3 items diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/reportingdemo.txt --- a/doc/example/reportingdemo.txt +++ b/doc/example/reportingdemo.txt @@ -13,7 +13,7 @@ assertion $ py.test failure_demo.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -30,7 +30,7 @@ failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ - self = + self = def test_simple(self): def f(): @@ -40,13 +40,13 @@ > assert f() == g() E assert 42 == 43 - E + where 42 = () - E + and 43 = () + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -66,19 +66,19 @@ failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = () + E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -89,7 +89,7 @@ failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -102,7 +102,7 @@ failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -115,7 +115,7 @@ failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -132,7 +132,7 @@ failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -156,7 +156,7 @@ failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,7 +166,7 @@ failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -178,7 +178,7 @@ failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} @@ -191,7 +191,7 @@ failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -207,7 +207,7 @@ failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -217,7 +217,7 @@ failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] @@ -226,7 +226,7 @@ failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -244,7 +244,7 @@ failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -257,7 +257,7 @@ failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -270,7 +270,7 @@ failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -289,7 +289,7 @@ i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .b + E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ @@ -299,8 +299,8 @@ b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .b - E + where = () + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ @@ -316,7 +316,7 @@ failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = + self = def _get_b(self): > raise Exception('Failed to get attrib') @@ -332,15 +332,15 @@ b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .b - E + where = () - E + and 2 = .b - E + where = () + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ - self = + self = def test_raises(self): s = 'qwe' @@ -352,10 +352,10 @@ > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /Users/hpk/p/pytest/_pytest/python.py:833>:1: ValueError + <0-codegen /Users/hpk/p/pytest/_pytest/python.py:957>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") @@ -364,7 +364,7 @@ failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ - self = + self = def test_raise(self): > raise ValueError("demo error") @@ -373,7 +373,7 @@ failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ - self = + self = def test_tupleerror(self): > a,b = [1] @@ -382,7 +382,7 @@ failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -395,7 +395,7 @@ l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - self = + self = def test_some_error(self): > if namenotexi: @@ -423,7 +423,7 @@ <2-codegen 'abc-123' /Users/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - self = + self = def test_complex_error(self): def f(): @@ -452,7 +452,7 @@ failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -462,7 +462,7 @@ failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - self = + self = def test_z2_type_error(self): l = 3 @@ -472,19 +472,19 @@ failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - self = + self = def test_startswith_nested(self): def f(): @@ -492,15 +492,15 @@ def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = () - E + and '456' = () + E assert ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -510,18 +510,18 @@ failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -531,7 +531,7 @@ failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 @@ -540,4 +540,4 @@ E assert 1 == 0 failure_demo.py:210: AssertionError - ======================== 39 failed in 0.39 seconds ========================= + ======================== 39 failed in 0.41 seconds ========================= diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/example/simple.txt --- a/doc/example/simple.txt +++ b/doc/example/simple.txt @@ -109,13 +109,13 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 gw0 I gw0 [0] scheduling tests via LoadScheduling - ============================= in 0.48 seconds ============================= + ============================= in 0.71 seconds ============================= .. _`excontrolskip`: @@ -156,12 +156,12 @@ $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /Users/hpk/tmp/doc-exec-172/conftest.py:9: need --runslow option to run + SKIP [1] /Users/hpk/tmp/doc-exec-630/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.02 seconds ==================== @@ -169,12 +169,12 @@ $ py.test --runslow =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 2 items test_module.py .. - ========================= 2 passed in 0.02 seconds ========================= + ========================= 2 passed in 0.62 seconds ========================= Writing well integrated assertion helpers -------------------------------------------------- @@ -261,7 +261,7 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 project deps: mylib-1.1 collecting ... collected 0 items @@ -284,7 +284,7 @@ $ py.test -v =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python + platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python info1: did you know that ... did you? collecting ... collected 0 items @@ -295,7 +295,7 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 0 items ============================= in 0.00 seconds ============================= @@ -326,3 +326,14 @@ Now we can profile which test functions execute slowest:: $ py.test --durations=3 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 + collecting ... collected 3 items + + test_some_are_slow.py ... + + ========================= slowest 3 test durations ========================= + 0.20s call test_some_are_slow.py::test_funcslow2 + 0.10s call test_some_are_slow.py::test_funcslow1 + 0.00s setup test_some_are_slow.py::test_funcfast + ========================= 3 passed in 0.32 seconds ========================= diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/funcargs.txt --- a/doc/funcargs.txt +++ b/doc/funcargs.txt @@ -61,14 +61,14 @@ Running the test looks like this:: $ py.test test_simplefactory.py - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_simplefactory.py F - =================================== FAILURES =================================== - ________________________________ test_function _________________________________ + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ myfuncarg = 42 @@ -77,7 +77,7 @@ E assert 42 == 17 test_simplefactory.py:5: AssertionError - =========================== 1 failed in 0.02 seconds =========================== + ========================= 1 failed in 0.03 seconds ========================= This means that indeed the test function was called with a ``myfuncarg`` argument value of ``42`` and the assert fails. Here is how py.test @@ -166,14 +166,14 @@ Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``:: $ py.test test_example.py - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 10 items test_example.py .........F - =================================== FAILURES =================================== - _________________________________ test_func[9] _________________________________ + ================================= FAILURES ================================= + _______________________________ test_func[9] _______________________________ numiter = 9 @@ -182,15 +182,15 @@ E assert 9 < 9 test_example.py:6: AssertionError - ====================== 1 failed, 9 passed in 0.07 seconds ====================== + ==================== 1 failed, 9 passed in 0.05 seconds ==================== Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during the test collection phase which is separate from the actual test running. Let's just look at what is collected:: $ py.test --collectonly test_example.py - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 10 items @@ -204,19 +204,19 @@ - =============================== in 0.01 seconds =============================== + ============================= in 0.01 seconds ============================= If you want to select only the run with the value ``7`` you could do:: $ py.test -v -k 7 test_example.py # or -k test_func[7] - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 -- /Users/hpk/venv/1/bin/python + =========================== test session starts ============================ + platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python collecting ... collected 10 items test_example.py:5: test_func[7] PASSED - ========================= 9 tests deselected by '-k7' ========================== - ==================== 1 passed, 9 deselected in 0.01 seconds ==================== + ======================= 9 tests deselected by '-k7' ======================== + ================== 1 passed, 9 deselected in 0.02 seconds ================== You might want to look at :ref:`more parametrization examples `. diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/getting-started.txt --- a/doc/getting-started.txt +++ b/doc/getting-started.txt @@ -22,10 +22,9 @@ To check your installation has installed the correct version:: $ py.test --version - This is py.test version 2.1.3, imported from /Users/hpk/p/pytest/pytest.pyc + This is py.test version 2.2.0, imported from /Users/hpk/p/pytest/pytest.pyc setuptools registered plugins: - pytest-cov-1.4 at /Users/hpk/venv/0/lib/python2.7/site-packages/pytest_cov.pyc - pytest-xdist-1.6 at /Users/hpk/venv/0/lib/python2.7/site-packages/xdist/plugin.pyc + pytest-xdist-1.7.dev1 at /Users/hpk/p/pytest-xdist/xdist/plugin.pyc If you get an error checkout :ref:`installation issues`. @@ -47,7 +46,7 @@ $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_sample.py F @@ -61,7 +60,7 @@ E + where 4 = func(3) test_sample.py:5: AssertionError - ========================= 1 failed in 0.02 seconds ========================= + ========================= 1 failed in 0.04 seconds ========================= py.test found the ``test_answer`` function by following :ref:`standard test discovery rules `, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. @@ -127,7 +126,7 @@ ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - self = + self = def test_two(self): x = "hello" @@ -164,7 +163,7 @@ ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - tmpdir = local('/Users/hpk/tmp/pytest-93/test_needsfiles0') + tmpdir = local('/Users/hpk/tmp/pytest-1595/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir @@ -173,8 +172,8 @@ test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ - /Users/hpk/tmp/pytest-93/test_needsfiles0 - 1 failed in 0.04 seconds + /Users/hpk/tmp/pytest-1595/test_needsfiles0 + 1 failed in 0.15 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/index.txt --- a/doc/index.txt +++ b/doc/index.txt @@ -27,13 +27,13 @@ - (new in 2.2) :ref:`durations` - (much improved in 2.2) :ref:`marking and test selection ` + - (improved in 2.2) :ref:`parametrized test functions ` - advanced :ref:`skip and xfail` + - unique :ref:`dependency injection through funcargs ` - can :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - can :ref:`continuously re-run failing tests ` - many :ref:`builtin helpers ` - flexible :ref:`Python test discovery` - - unique :ref:`dependency injection through funcargs ` - - :ref:`parametrized test functions ` - **integrates many common testing methods** diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/mark.txt --- a/doc/mark.txt +++ b/doc/mark.txt @@ -7,7 +7,7 @@ .. currentmodule:: _pytest.mark By using the ``pytest.mark`` helper you can easily set -metadata on your test functions. To begin with, there are +metadata on your test functions. There are some builtin markers, for example: * :ref:`skipif ` - skip a test function if a certain condition is met @@ -16,174 +16,10 @@ * :ref:`parametrize ` to perform multiple calls to the same test function. -It's also easy to create custom markers or to apply markers -to whole test classes or modules. +It's easy to create custom markers or to apply markers +to whole test classes or modules. See :ref:`mark examples` for examples +which also serve as documentation. -marking test functions and selecting them for a run ----------------------------------------------------- - -You can "mark" a test function with custom metadata like this:: - - # content of test_server.py - - import pytest - @pytest.mark.webtest - def test_send_http(): - pass # perform some webtest test for your app - def test_something_quick(): - pass - -.. versionadded:: 2.2 - -You can then restrict a test run to only run tests marked with ``webtest``:: - - $ py.test -v -m webtest - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python - collecting ... collected 2 items - - test_server.py:3: test_send_http PASSED - - ===================== 1 tests deselected by "-m 'webtest'" ===================== - ==================== 1 passed, 1 deselected in 0.01 seconds ==================== - -Or the inverse, running all tests except the webtest ones:: - - $ py.test -v -m "not webtest" - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python - collecting ... collected 2 items - - test_server.py:6: test_something_quick PASSED - - =================== 1 tests deselected by "-m 'not webtest'" =================== - ==================== 1 passed, 1 deselected in 0.01 seconds ==================== - -Registering markers -------------------------------------- - -.. versionadded:: 2.2 - -.. ini-syntax for custom markers: - -Registering markers for your test suite is simple:: - - # content of pytest.ini - [pytest] - markers = - webtest: mark a test as a webtest. - -You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:: - - $ py.test --markers - @pytest.mark.webtest: mark a test as a webtest. - - @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. - - @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. - - @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - - @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - - -For an example on how to add and work markers from a plugin, see -:ref:`adding a custom marker from a plugin`. - -.. note:: - - It is recommended to explicitely register markers so that: - - * there is one place in your test suite defining your markers - - * asking for existing markers via ``py.test --markers`` gives good output - - * typos in function markers are treated as an error if you use - the ``--strict`` option. Later versions of py.test are probably - going to treat non-registered markers as an error. - -.. _`scoped-marking`: - -Marking whole classes or modules ----------------------------------------------------- - -If you are programming with Python2.6 you may use ``pytest.mark`` decorators -with classes to apply markers to all of its test methods:: - - # content of test_mark_classlevel.py - import pytest - @pytest.mark.webtest - class TestClass: - def test_startup(self): - pass - def test_startup_and_more(self): - pass - -This is equivalent to directly applying the decorator to the -two test functions. - -To remain backward-compatible with Python2.4 you can also set a -``pytestmark`` attribute on a TestClass like this:: - - import pytest - - class TestClass: - pytestmark = pytest.mark.webtest - -or if you need to use multiple markers you can use a list:: - - import pytest - - class TestClass: - pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] - -You can also set a module level marker:: - - import pytest - pytestmark = pytest.mark.webtest - -in which case it will be applied to all functions and -methods defined in the module. - -Using ``-k TEXT`` to select tests ----------------------------------------------------- - -You can use the ``-k`` command line option to only run tests with names that match the given argument:: - - $ py.test -k send_http # running with the above defined examples - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 - collecting ... collected 4 items - - test_server.py . - - ===================== 3 tests deselected by '-ksend_http' ====================== - ==================== 1 passed, 3 deselected in 0.02 seconds ==================== - -And you can also run all tests except the ones that match the keyword:: - - $ py.test -k-send_http - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 - collecting ... collected 4 items - - test_mark_classlevel.py .. - test_server.py . - - ===================== 1 tests deselected by '-k-send_http' ===================== - ==================== 3 passed, 1 deselected in 0.03 seconds ==================== - -Or to only select the class:: - - $ py.test -kTestClass - ============================= test session starts ============================== - platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 - collecting ... collected 4 items - - test_mark_classlevel.py .. - - ===================== 2 tests deselected by '-kTestClass' ====================== - ==================== 2 passed, 2 deselected in 0.02 seconds ==================== API reference for mark related objects ------------------------------------------------ diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/monkeypatch.txt --- a/doc/monkeypatch.txt +++ b/doc/monkeypatch.txt @@ -39,10 +39,10 @@ .. background check: $ py.test =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 0 items - ============================= in 0.00 seconds ============================= + ============================= in 0.20 seconds ============================= Method reference of the monkeypatch function argument ----------------------------------------------------- diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/skipping.txt --- a/doc/skipping.txt +++ b/doc/skipping.txt @@ -130,7 +130,7 @@ example $ py.test -rx xfail_demo.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 6 items xfail_demo.py xxxxxx @@ -147,7 +147,7 @@ XFAIL xfail_demo.py::test_hello6 reason: reason - ======================== 6 xfailed in 0.11 seconds ========================= + ======================== 6 xfailed in 0.08 seconds ========================= .. _`evaluation of skipif/xfail conditions`: diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/talks.txt --- a/doc/talks.txt +++ b/doc/talks.txt @@ -23,8 +23,7 @@ Test parametrization: -- `generating parametrized tests with funcargs`_ (uses deprecated - ``addcall()`` API. +- `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API. - `test generators and cached setup`_ - `parametrizing tests, generalized`_ (blog post) - `putting test-hooks into local or global plugins`_ (blog post) diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/tmpdir.txt --- a/doc/tmpdir.txt +++ b/doc/tmpdir.txt @@ -28,7 +28,7 @@ $ py.test test_tmpdir.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_tmpdir.py F @@ -36,7 +36,7 @@ ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - tmpdir = local('/Users/hpk/tmp/pytest-94/test_create_file0') + tmpdir = local('/Users/hpk/tmp/pytest-1596/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -47,7 +47,7 @@ E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.05 seconds ========================= + ========================= 1 failed in 0.20 seconds ========================= .. _`base temporary directory`: diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb doc/unittest.txt --- a/doc/unittest.txt +++ b/doc/unittest.txt @@ -24,7 +24,7 @@ $ py.test test_unittest.py =========================== test session starts ============================ - platform darwin -- Python 2.7.1 -- pytest-2.1.3 + platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 1 items test_unittest.py F @@ -42,7 +42,7 @@ test_unittest.py:8: AssertionError ----------------------------- Captured stdout ------------------------------ hello - ========================= 1 failed in 0.04 seconds ========================= + ========================= 1 failed in 0.23 seconds ========================= .. _`unittest.py style`: http://docs.python.org/library/unittest.html diff -r bc206c7d96286b517a5f8d4ff089b323c9bc6c59 -r ccd5794f5850e6a635c8348999a1ebd846e15abb setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.2.0.dev11', + version='2.2.0', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 20:09:13 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 19:09:13 -0000 Subject: [py-svn] commit/pytest-xdist: 4 new changesets Message-ID: <20111118190913.2578.68348@bitbucket03.managed.contegix.com> 4 new commits in pytest-xdist: https://bitbucket.org/hpk42/pytest-xdist/changeset/6e46d056c733/ changeset: 6e46d056c733 user: hpk42 date: 2011-11-18 19:49:29 summary: Added tag 1.7 for changeset 20875fed94e7 affected #: 1 file diff -r 20875fed94e7f3dff50bdf762df91153b15ceca6 -r 6e46d056c7337c2c0dc33a74a8de8a8596256952 .hgtags --- a/.hgtags +++ b/.hgtags @@ -7,3 +7,4 @@ a423748bf17ee778a37853225210257699cad9c1 1.4 cd44a941c833c098e4899fe3d42a96703754d0d5 1.5 4815040bdad8f182a5487f57a9da385483836e75 1.6 +20875fed94e7f3dff50bdf762df91153b15ceca6 1.7 https://bitbucket.org/hpk42/pytest-xdist/changeset/29c38e195526/ changeset: 29c38e195526 user: hpk42 date: 2011-11-18 19:50:26 summary: depend on 2.2.0 affected #: 1 file diff -r 6e46d056c7337c2c0dc33a74a8de8a8596256952 -r 29c38e195526f5f0fdd651fb51f59d6efaaafbb0 setup.py --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ packages = ['xdist'], entry_points = {'pytest11': ['xdist = xdist.plugin'],}, zip_safe=False, - install_requires = ['execnet>=1.0.8', 'pytest>=2.2.0.dev2'], + install_requires = ['execnet>=1.0.8', 'pytest>=2.2.0'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', https://bitbucket.org/hpk42/pytest-xdist/changeset/c5e84b79ba05/ changeset: c5e84b79ba05 user: hpk42 date: 2011-11-18 20:07:56 summary: Added tag 1.7 for changeset 29c38e195526 affected #: 1 file diff -r 29c38e195526f5f0fdd651fb51f59d6efaaafbb0 -r c5e84b79ba05c446cd90e71fa3d4b5386be3aa6d .hgtags --- a/.hgtags +++ b/.hgtags @@ -8,3 +8,5 @@ cd44a941c833c098e4899fe3d42a96703754d0d5 1.5 4815040bdad8f182a5487f57a9da385483836e75 1.6 20875fed94e7f3dff50bdf762df91153b15ceca6 1.7 +20875fed94e7f3dff50bdf762df91153b15ceca6 1.7 +29c38e195526f5f0fdd651fb51f59d6efaaafbb0 1.7 https://bitbucket.org/hpk42/pytest-xdist/changeset/247b401a756a/ changeset: 247b401a756a user: hpk42 date: 2011-11-18 20:08:05 summary: tag 1.7 affected #: 2 files diff -r c5e84b79ba05c446cd90e71fa3d4b5386be3aa6d -r 247b401a756ad6a0c5f73206b8bb5f0182057783 setup.py --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name="pytest-xdist", - version='1.7.dev1', + version='1.7', description='py.test xdist plugin for distributed testing and loop-on-failing modes', long_description=open('README.txt').read(), license='GPLv2 or later', @@ -27,4 +27,4 @@ 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], -) +) \ No newline at end of file diff -r c5e84b79ba05c446cd90e71fa3d4b5386be3aa6d -r 247b401a756ad6a0c5f73206b8bb5f0182057783 xdist/__init__.py --- a/xdist/__init__.py +++ b/xdist/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '1.7.dev1' +__version__ = '1.7' Repository URL: https://bitbucket.org/hpk42/pytest-xdist/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 20:10:30 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 19:10:30 -0000 Subject: [py-svn] commit/pytest: 2 new changesets Message-ID: <20111118191030.17622.23512@bitbucket12.managed.contegix.com> 2 new commits in pytest: https://bitbucket.org/hpk42/pytest/changeset/152271036933/ changeset: 152271036933 user: hpk42 date: 2011-11-18 19:45:15 summary: small fix to release announcement affected #: 2 files diff -r ccd5794f5850e6a635c8348999a1ebd846e15abb -r 1522710369337d96bf9568569d5f0ca9b38a74e0 doc/Makefile --- a/doc/Makefile +++ b/doc/Makefile @@ -40,7 +40,7 @@ -rm -rf $(BUILDDIR)/* install: html - @rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0.dev7 + @rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0 installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest diff -r ccd5794f5850e6a635c8348999a1ebd846e15abb -r 1522710369337d96bf9568569d5f0ca9b38a74e0 doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -5,9 +5,9 @@ py.test testing tool. Plugins might need upgrades. It comes with these improvements: -* more powerful parametrization of tests: +* easier and more powerful parametrization of tests: - - new @pytest.mark.parametrize decorator for running test functions + - new @pytest.mark.parametrize decorator to run tests with different arguments - new metafunc.parametrize() API for parametrizing arguments independently - see examples at http://pytest.org/latest/example/parametrize.html - NOTE that parametrize() related APIs are still a bit experimental https://bitbucket.org/hpk42/pytest/changeset/cb50752789ad/ changeset: cb50752789ad user: hpk42 date: 2011-11-18 19:48:44 summary: Added tag 2.2.0 for changeset 152271036933 affected #: 1 file diff -r 1522710369337d96bf9568569d5f0ca9b38a74e0 -r cb50752789ad067d7eaa3ce86bf96ed5027a383b .hgtags --- a/.hgtags +++ b/.hgtags @@ -44,3 +44,4 @@ e5e1746a197f0398356a43fbe2eebac9690f795d 2.1.0 5864412c6f3c903384243bd315639d101d7ebc67 2.1.2 12a05d59249f80276e25fd8b96e8e545b1332b7a 2.1.3 +1522710369337d96bf9568569d5f0ca9b38a74e0 2.2.0 Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 20:16:09 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 19:16:09 -0000 Subject: [py-svn] commit/pytest: hpk42: fix typo in alfredo's name Message-ID: <20111118191609.19267.6795@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/f74e56f745e4/ changeset: f74e56f745e4 user: hpk42 date: 2011-11-18 20:16:00 summary: fix typo in alfredo's name affected #: 1 file diff -r cb50752789ad067d7eaa3ce86bf96ed5027a383b -r f74e56f745e4e106b10ad66f395103d082b5f002 doc/announce/release-2.2.0.txt --- a/doc/announce/release-2.2.0.txt +++ b/doc/announce/release-2.2.0.txt @@ -39,8 +39,7 @@ pip install -U pytest # or easy_install -U pytest -Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, -Alfredo Doza and all who gave feedback or sent bug reports. +Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports. best, holger krekel Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 22:27:23 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 21:27:23 -0000 Subject: [py-svn] commit/pytest: hpk42: isolate test example run Message-ID: <20111118212723.11699.60263@bitbucket13.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/abf6012c985a/ changeset: abf6012c985a user: hpk42 date: 2011-11-18 22:26:38 summary: isolate test example run affected #: 2 files diff -r f74e56f745e4e106b10ad66f395103d082b5f002 -r abf6012c985a9aa3940072d0804a1235c051a9ee doc/Makefile --- a/doc/Makefile +++ b/doc/Makefile @@ -40,7 +40,7 @@ -rm -rf $(BUILDDIR)/* install: html - @rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0 + -avz _build/html/ pytest.org:/www/pytest.org/latest installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest diff -r f74e56f745e4e106b10ad66f395103d082b5f002 -r abf6012c985a9aa3940072d0804a1235c051a9ee doc/example/markers.txt --- a/doc/example/markers.txt +++ b/doc/example/markers.txt @@ -25,26 +25,26 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python collecting ... collected 2 items test_server.py:3: test_send_http PASSED - =================== 1 tests deselected by "-m 'webtest'" =================== - ================== 1 passed, 1 deselected in 0.01 seconds ================== + ===================== 1 tests deselected by "-m 'webtest'" ===================== + ==================== 1 passed, 1 deselected in 0.01 seconds ==================== Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 -- /Users/hpk/venv/1/bin/python collecting ... collected 2 items test_server.py:6: test_something_quick PASSED - ================= 1 tests deselected by "-m 'not webtest'" ================= - ================== 1 passed, 1 deselected in 0.01 seconds ================== + =================== 1 tests deselected by "-m 'not webtest'" =================== + ==================== 1 passed, 1 deselected in 0.02 seconds ==================== Registering markers ------------------------------------- @@ -140,45 +140,47 @@ You can use the ``-k`` command line option to only run tests with names that match the given argument:: $ py.test -k send_http # running with the above defined examples - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 4 items test_server.py . - =================== 3 tests deselected by '-ksend_http' ==================== - ================== 1 passed, 3 deselected in 0.02 seconds ================== + ===================== 3 tests deselected by '-ksend_http' ====================== + ==================== 1 passed, 3 deselected in 0.02 seconds ==================== And you can also run all tests except the ones that match the keyword:: $ py.test -k-send_http - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 4 items test_mark_classlevel.py .. test_server.py . - =================== 1 tests deselected by '-k-send_http' =================== - ================== 3 passed, 1 deselected in 0.03 seconds ================== + ===================== 1 tests deselected by '-k-send_http' ===================== + ==================== 3 passed, 1 deselected in 0.03 seconds ==================== Or to only select the class:: $ py.test -kTestClass - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 collecting ... collected 4 items test_mark_classlevel.py .. - =================== 2 tests deselected by '-kTestClass' ==================== - ================== 2 passed, 2 deselected in 0.02 seconds ================== + ===================== 2 tests deselected by '-kTestClass' ====================== + ==================== 2 passed, 2 deselected in 0.02 seconds ==================== .. _`adding a custom marker from a plugin`: custom marker and command line option to control test runs ---------------------------------------------------------- +.. regendoc:wipe + Plugins can provide custom markers and implement specific behaviour based on it. This is a self-contained example which adds a command line option and a parametrized test function marker to run tests @@ -218,34 +220,28 @@ the test needs:: $ py.test -E stage2 - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 - collecting ... collected 5 items + collecting ... collected 1 items - test_mark_classlevel.py .. - test_server.py .. test_someenv.py s - =================== 4 passed, 1 skipped in 0.04 seconds ==================== + ========================== 1 skipped in 0.02 seconds =========================== and here is one that specifies exactly the environment needed:: $ py.test -E stage1 - =========================== test session starts ============================ + ============================= test session starts ============================== platform darwin -- Python 2.7.1 -- pytest-2.2.0 - collecting ... collected 5 items + collecting ... collected 1 items - test_mark_classlevel.py .. - test_server.py .. test_someenv.py . - ========================= 5 passed in 0.04 seconds ========================= + =========================== 1 passed in 0.02 seconds =========================== The ``--markers`` option always gives you a list of available markers:: $ py.test --markers - @pytest.mark.webtest: mark a test as a webtest. - @pytest.mark.env(name): mark test to run only on named environment @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 18 22:30:15 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 18 Nov 2011 21:30:15 -0000 Subject: [py-svn] commit/pytest: hpk42: fix makeinstall Message-ID: <20111118213015.25019.35017@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/6331d279dc68/ changeset: 6331d279dc68 user: hpk42 date: 2011-11-18 22:28:14 summary: fix makeinstall affected #: 1 file diff -r abf6012c985a9aa3940072d0804a1235c051a9ee -r 6331d279dc682da32f70872ac541d3a3a2380c51 doc/Makefile --- a/doc/Makefile +++ b/doc/Makefile @@ -40,7 +40,7 @@ -rm -rf $(BUILDDIR)/* install: html - -avz _build/html/ pytest.org:/www/pytest.org/latest + rsync -avz _build/html/ pytest.org:/www/pytest.org/latest installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sun Nov 20 00:45:37 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Sat, 19 Nov 2011 23:45:37 -0000 Subject: [py-svn] commit/pytest: hpk42: improve parametrize() docs Message-ID: <20111119234537.32308.73227@bitbucket02.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/fa35a4fd9a6c/ changeset: fa35a4fd9a6c user: hpk42 date: 2011-11-20 00:45:05 summary: improve parametrize() docs affected #: 2 files diff -r 6331d279dc682da32f70872ac541d3a3a2380c51 -r fa35a4fd9a6cd55b3bf832a62b65e6e007c088ae _pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -588,22 +588,23 @@ self._ids = py.builtin.set() def parametrize(self, argnames, argvalues, indirect=False, ids=None): - """ parametrize calls to the underlying test function during - the collection phase of a test run. parametrize may be called - multiple times for disjunct argnames sets. + """ add new invocations to the underlying test function using the + list of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + you may pass indirect=True and implement a funcarg factory which can + perform the expensive setup just before a test is actually run. :arg argnames: an argument name or a list of argument names - :arg argvalues: a list of values for a single argument if argnames - specified a single argument only or a list of tuples which specify - values for the multiple argument names. + :arg argvalues: a list of values for the argname or a list of tuples of + values for the list of argument names. :arg indirect: if True each argvalue corresponding to an argument will be - passed as request.param to the respective funcarg factory so that + passed as request.param to its respective funcarg factory so that it can perform more expensive setups during the setup phase of - a test rather than at collection time (which is the default). + a test rather than at collection time. - :arg ids: list of string ids corresponding to the (list of) argvalues + :arg ids: list of string ids each corresponding to the argvalues so that they are part of the test id. If no ids are provided they will be generated automatically from the argvalues. """ diff -r 6331d279dc682da32f70872ac541d3a3a2380c51 -r fa35a4fd9a6cd55b3bf832a62b65e6e007c088ae doc/funcargs.txt --- a/doc/funcargs.txt +++ b/doc/funcargs.txt @@ -240,5 +240,5 @@ ``metafunc.config``: access to command line opts and general config -.. automethod:: Metafunc.parametrize(name, values, idmaker=None) -.. automethod:: Metafunc.addcall(funcargs=None, id=_notexists, param=_notexists) +.. automethod:: Metafunc.parametrize +.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists) Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sun Nov 20 21:56:25 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Sun, 20 Nov 2011 20:56:25 -0000 Subject: [py-svn] commit/py: hpk42: fix issue7 - raise a proper error if no valid statementrange can be found Message-ID: <20111120205625.14306.24299@bitbucket13.managed.contegix.com> 1 new commit in py: https://bitbucket.org/hpk42/py/changeset/c8e97602a6cb/ changeset: c8e97602a6cb user: hpk42 date: 2011-11-20 21:55:43 summary: fix issue7 - raise a proper error if no valid statementrange can be found affected #: 5 files diff -r 07ca410dedff1dceec578ad0a629e3bfd29869b7 -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,9 @@ +Changes between 1.4.5 and 1.4.x +================================================== + +- fix issue7: source.getstatementrange() now raises proper error + if no valid statement can be found + Changes between 1.4.4 and 1.4.5 ================================================== diff -r 07ca410dedff1dceec578ad0a629e3bfd29869b7 -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ (c) Holger Krekel and others, 2004-2010 """ -__version__ = '1.4.5' +__version__ = '1.4.6.dev1' from py import _apipkg diff -r 07ca410dedff1dceec578ad0a629e3bfd29869b7 -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa py/_code/source.py --- a/py/_code/source.py +++ b/py/_code/source.py @@ -108,6 +108,7 @@ def getstatementrange(self, lineno, assertion=False): """ return (start, end) tuple which spans the minimal statement region which containing the given lineno. + raise a ValueError if no such statementrange can be found. """ # XXX there must be a better than these heuristic ways ... # XXX there may even be better heuristics :-) @@ -116,6 +117,7 @@ # 1. find the start of the statement from codeop import compile_command + end = None for start in range(lineno, -1, -1): if assertion: line = self.lines[start] @@ -139,6 +141,8 @@ trysource = self[start:end] if trysource.isparseable(): return start, end + if end is None: + raise ValueError("no valid source range around line %d " % (lineno,)) return start, end def getblockend(self, lineno): diff -r 07ca410dedff1dceec578ad0a629e3bfd29869b7 -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa setup.py --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ name='py', description='library with cross-python path, ini-parsing, io, code, log facilities', long_description = open('README.txt').read(), - version='1.4.5', + version='1.4.6.dev1', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 07ca410dedff1dceec578ad0a629e3bfd29869b7 -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa testing/code/test_source.py --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -236,6 +236,10 @@ r = source.getstatementrange(1) assert r == (1,2) + def test_getstatementrange_with_syntaxerror_issue7(self): + source = Source(":") + py.test.raises(ValueError, lambda: source.getstatementrange(0)) + @py.test.mark.skipif("sys.version_info < (2,6)") def test_compile_to_ast(self): import ast Repository URL: https://bitbucket.org/hpk42/py/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Fri Nov 25 22:34:19 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Fri, 25 Nov 2011 21:34:19 -0000 Subject: [py-svn] commit/pytest: hpk42: fix docstring for setup.py Message-ID: <20111125213419.31077.38092@bitbucket01.managed.contegix.com> 1 new commit in pytest: https://bitbucket.org/hpk42/pytest/changeset/c350e81eba00/ changeset: c350e81eba00 user: hpk42 date: 2011-11-25 22:34:05 summary: fix docstring for setup.py affected #: 1 file diff -r fa35a4fd9a6cd55b3bf832a62b65e6e007c088ae -r c350e81eba007b8037b0562b0a6f6fdd9fd9b05f setup.py --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ Platforms: Linux, Win32, OSX -Interpreters: Python versions 2.4 through to 3.2, Jython 2.5.1 and PyPy-1.5 +Interpreters: Python versions 2.4 through to 3.2, Jython 2.5.1 and PyPy-1.6/1.7 Bugs and issues: http://bitbucket.org/hpk42/pytest/issues/ @@ -70,4 +70,4 @@ return {'console_scripts': l} if __name__ == '__main__': - main() \ No newline at end of file + main() Repository URL: https://bitbucket.org/hpk42/pytest/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Sat Nov 26 23:42:05 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Sat, 26 Nov 2011 22:42:05 -0000 Subject: [py-svn] commit/py: hpk42: try to fix failures related to subversion 1.7. Message-ID: <20111126224205.12904.84406@bitbucket13.managed.contegix.com> 1 new commit in py: https://bitbucket.org/hpk42/py/changeset/5afa5d2a3bea/ changeset: 5afa5d2a3bea user: hpk42 date: 2011-11-24 11:21:51 summary: try to fix failures related to subversion 1.7. affected #: 6 files diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -3,6 +3,9 @@ - fix issue7: source.getstatementrange() now raises proper error if no valid statement can be found +- fix issue8: fix code and tests of svnurl/svnwc to work on subversion 1.7 - + note that path.status(updates=1) will not properly work svn-17's status + --xml output is broken. Changes between 1.4.4 and 1.4.5 ================================================== diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ (c) Holger Krekel and others, 2004-2010 """ -__version__ = '1.4.6.dev1' +__version__ = '1.4.6.dev3' from py import _apipkg diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c py/_path/svnurl.py --- a/py/_path/svnurl.py +++ b/py/_path/svnurl.py @@ -233,6 +233,8 @@ e = sys.exc_info()[1] if e.err.find('non-existent in that revision') != -1: raise py.error.ENOENT(self, e.err) + elif e.err.find("E200009:") != -1: + raise py.error.ENOENT(self, e.err) elif e.err.find('File not found') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('not part of a repository')!=-1: diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c py/_path/svnwc.py --- a/py/_path/svnwc.py +++ b/py/_path/svnwc.py @@ -482,10 +482,13 @@ except py.process.cmdexec.Error: e = sys.exc_info()[1] strerr = e.err.lower() - if strerr.find('file not found') != -1: + if strerr.find('not found') != -1: + raise py.error.ENOENT(self) + elif strerr.find("E200009:") != -1: raise py.error.ENOENT(self) if (strerr.find('file exists') != -1 or strerr.find('file already exists') != -1 or + strerr.find('w150002:') != -1 or strerr.find("can't create directory") != -1): raise py.error.EEXIST(self) raise @@ -593,7 +596,7 @@ out = self._authsvn('lock').strip() if not out: # warning or error, raise exception - raise Exception(out[4:]) + raise ValueError("unknown error in svn lock command") def unlock(self): """ unset a previously set lock """ @@ -1066,6 +1069,8 @@ modrev = '?' author = '?' date = '' + elif itemstatus == "replaced": + pass else: #print entryel.toxml() commitel = entryel.getElementsByTagName('commit')[0] @@ -1148,7 +1153,11 @@ raise ValueError("Not a versioned resource") #raise ValueError, "Not a versioned resource %r" % path self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] - self.rev = int(d['revision']) + try: + self.rev = int(d['revision']) + except KeyError: + self.rev = None + self.path = py.path.local(d['path']) self.size = self.path.size() if 'lastchangedrev' in d: diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c setup.py --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ name='py', description='library with cross-python path, ini-parsing, io, code, log facilities', long_description = open('README.txt').read(), - version='1.4.6.dev1', + version='1.4.6.dev3', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 5afa5d2a3bea96b5741202a799aae44b8394802c testing/path/test_svnwc.py --- a/testing/path/test_svnwc.py +++ b/testing/path/test_svnwc.py @@ -1,5 +1,6 @@ import py import os, sys +import pytest from py._path.svnwc import InfoSvnWCCommand, XMLWCStatus, parse_wcinfotime from py._path import svnwc as svncommon from svntestbase import CommonSvnTests @@ -105,6 +106,7 @@ assert r.join('sampledir/otherfile').basename in [item.basename for item in s.unchanged] + @pytest.mark.xfail(reason="svn-1.7 has buggy 'status --xml' output") def test_status_update(self, path1): r = path1 try: @@ -112,6 +114,7 @@ s = r.status(updates=1, rec=1) # Comparing just the file names, because paths are unpredictable # on Windows. (long vs. 8.3 paths) + py.std.pprint.pprint(s.allpath()) assert r.join('anotherfile').basename in [item.basename for item in s.update_available] #assert len(s.update_available) == 1 @@ -122,7 +125,6 @@ p = path1.join("samplefile") p.remove() p.ensure(dir=0) - p.add() try: s = path1.status() assert p.basename in [item.basename for item in s.replaced] @@ -164,8 +166,6 @@ otherrepo, otherrepourl, otherwc = repowc2 d = path1.ensure('sampledir', dir=1) try: - d.remove() - d.add() d.update() d.propset('svn:externals', 'otherwc %s' % (otherwc.url,)) d.update() @@ -181,7 +181,7 @@ def test_status_deleted(self, path1): d = path1.ensure('sampledir', dir=1) d.remove() - d.add() + d.ensure(dir=1) path1.commit() d.ensure('deletefile', dir=0) d.commit() @@ -338,7 +338,7 @@ somefile = root.join('somefile') somefile.ensure(file=True) # not yet added to repo - py.test.raises(py.process.cmdexec.Error, 'somefile.lock()') + py.test.raises((py.process.cmdexec.Error, ValueError), 'somefile.lock()') somefile.write('foo') somefile.commit('test') assert somefile.check(versioned=True) Repository URL: https://bitbucket.org/hpk42/py/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. From commits-noreply at bitbucket.org Wed Nov 30 18:24:07 2011 From: commits-noreply at bitbucket.org (Bitbucket) Date: Wed, 30 Nov 2011 17:24:07 -0000 Subject: [py-svn] commit/py: 8 new changesets Message-ID: <20111130172407.1573.73206@bitbucket03.managed.contegix.com> 8 new commits in py: https://bitbucket.org/hpk42/py/changeset/1d0f65a07a1c/ changeset: 1d0f65a07a1c user: RonnyPfannschmidt date: 2011-11-23 08:40:27 summary: make traceback recursion detection more resilent about the eval magic of a decorator lib affected #: 2 files diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -3,6 +3,8 @@ - fix issue7: source.getstatementrange() now raises proper error if no valid statement can be found +- make trackeback recursion detection more resilent + about the eval magic of a decorator library Changes between 1.4.4 and 1.4.5 ================================================== diff -r c8e97602a6cb3a2b3332f1bb3f76a857bfa7c5fa -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -283,7 +283,11 @@ """ cache = {} for i, entry in enumerate(self): - key = entry.frame.code.path, entry.lineno + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + #XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno #print "checking for recursion at", key l = cache.setdefault(key, []) if l: https://bitbucket.org/hpk42/py/changeset/214561f55dfc/ changeset: 214561f55dfc user: RonnyPfannschmidt date: 2011-11-23 08:42:25 summary: add py.builtin.next affected #: 4 files diff -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -5,6 +5,7 @@ if no valid statement can be found - make trackeback recursion detection more resilent about the eval magic of a decorator library +- add py.builtin.next Changes between 1.4.4 and 1.4.5 ================================================== diff -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -104,6 +104,7 @@ 'builtins' : '._builtin:builtins', 'execfile' : '._builtin:execfile', 'callable' : '._builtin:callable', + 'next' : '._builtin:next', }, # input-output helping diff -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 py/_builtin.py --- a/py/_builtin.py +++ b/py/_builtin.py @@ -91,6 +91,22 @@ enumerate = enumerate try: + next = next +except NameError: + _next_noarg = object() + def next(it, default=_next_noarg): + try: + if hasattr(it, '__next__'): + return it.__next__() + else: + return it.next() + except StopIteration: + if default is _next_noarg: + raise + else: + return default + +try: BaseException = BaseException except NameError: BaseException = Exception diff -r 1d0f65a07a1c1673d45fbe984dadf74a1780f83b -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 testing/root/test_builtin.py --- a/testing/root/test_builtin.py +++ b/testing/root/test_builtin.py @@ -1,7 +1,7 @@ import sys import types import py -from py.builtin import set, frozenset, reversed, sorted +from py.builtin import set, frozenset, reversed, sorted, next def test_enumerate(): l = [0,1,2] @@ -160,3 +160,22 @@ code = py.builtin._getcode(test_getcode) assert isinstance(code, types.CodeType) assert py.builtin._getcode(4) is None + +def test_next(): + it = iter([]) + py.test.raises(StopIteraton, next, it) + it = iter('1') + n = next(it) + assert n == '1' + py.test.raises(StopIteraton, next, it) + + class new_next(object): + def __next__(self): + return 1 + assert next(new_next()) == 1 + + class old_next(object): + def next(self): + return 1 + assert next(old_next) == 1 + https://bitbucket.org/hpk42/py/changeset/7671e4e54b2e/ changeset: 7671e4e54b2e user: RonnyPfannschmidt date: 2011-11-23 09:56:59 summary: make source.getstatementrange() resilent against non-python like jinja2 affected #: 3 files diff -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 -r 7671e4e54b2e8107679b6c343c8fcf22b843668a CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -3,6 +3,8 @@ - fix issue7: source.getstatementrange() now raises proper error if no valid statement can be found +- make source.getstatementrange() more resilent about non-python code frames + (as seen from jnja2) - make trackeback recursion detection more resilent about the eval magic of a decorator library - add py.builtin.next diff -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 -r 7671e4e54b2e8107679b6c343c8fcf22b843668a py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -171,6 +171,8 @@ _, end = source.getstatementrange(end) except IndexError: end = self.lineno + 1 + except ValueError: + pass # heuristic to stop displaying source on e.g. # if something: # assume this causes a NameError # # _this_ lines and the one diff -r 214561f55dfcc96014618ae1cf5d83ec6e9619a7 -r 7671e4e54b2e8107679b6c343c8fcf22b843668a testing/code/test_excinfo.py --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -238,6 +238,20 @@ else: assert s == " File '':1 in \n ???\n" +def test_excinfo_no_python_sourcecode(tmpdir): + tmpdir.join('test.txt').write("{{ h()}}:") + + jinja2 = py.test.importorskip('jinja2') + loader = jinja2.FileSystemLoader(str(tmpdir)) + env = jinja2.Environment(loader=loader) + template = env.get_template('test.txt') + excinfo = py.test.raises(ValueError, + template.render, h=h) + for item in excinfo.traceback: + print(item) #XXX: for some reason jinja.Template.render is printed in full + item.source # shouldnt fail + + def test_entrysource_Queue_example(): try: queue.Queue().get(timeout=0.001) https://bitbucket.org/hpk42/py/changeset/0121003c2f17/ changeset: 0121003c2f17 user: RonnyPfannschmidt date: 2011-11-23 15:11:06 summary: add test for the decorator recursion missdetection and a testenv that depends on the external tools affected #: 2 files diff -r 7671e4e54b2e8107679b6c343c8fcf22b843668a -r 0121003c2f17b104015309771cb18d7cd5f7927d testing/code/test_excinfo.py --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -154,6 +154,25 @@ recindex = traceback.recursionindex() assert recindex is None + def test_traceback_messy_recursion(self): + #XXX: simplified locally testable version + decorator = py.test.importorskip('decorator').decorator + + def log(f, *k, **kw): + print('%s %s' % (k, kw)) + f(*k, **kw) + log = decorator(log) + + def fail(): + raise ValueError('') + + fail = log(log(fail)) + + excinfo = py.test.raises(ValueError, fail) + assert excinfo.traceback.recursionindex() is None + + + def test_traceback_getcrashentry(self): def i(): __tracebackhide__ = True @@ -239,6 +258,7 @@ assert s == " File '':1 in \n ???\n" def test_excinfo_no_python_sourcecode(tmpdir): + #XXX: simplified locally testable version tmpdir.join('test.txt').write("{{ h()}}:") jinja2 = py.test.importorskip('jinja2') diff -r 7671e4e54b2e8107679b6c343c8fcf22b843668a -r 0121003c2f17b104015309771cb18d7cd5f7927d tox.ini --- a/tox.ini +++ b/tox.ini @@ -22,6 +22,14 @@ commands= {envpython} -m pytest --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}0.xml [io_ code] +[testenv:external] +deps= + pytest + jinja2 + decorator +commands= + py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml testing/code + [pytest] rsyncdirs = conftest.py py doc testing addopts = -rxXf https://bitbucket.org/hpk42/py/changeset/be8202d2aa35/ changeset: be8202d2aa35 user: RonnyPfannschmidt date: 2011-11-23 15:14:06 summary: fix argument path in tox.ini affected #: 1 file diff -r 0121003c2f17b104015309771cb18d7cd5f7927d -r be8202d2aa3502f240e7308c7d2c15726908f072 tox.ini --- a/tox.ini +++ b/tox.ini @@ -28,7 +28,7 @@ jinja2 decorator commands= - py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml testing/code + py.test --confcutdir=.. -rfsxX --junitxml={envlogdir}/junit-{envname}.xml {posargs:code} [pytest] rsyncdirs = conftest.py py doc testing https://bitbucket.org/hpk42/py/changeset/2ffc28a9cb69/ changeset: 2ffc28a9cb69 user: RonnyPfannschmidt date: 2011-11-29 11:28:29 summary: ensure tracebackitem.source returns the actual line if it cant find a statement range affected #: 2 files diff -r be8202d2aa3502f240e7308c7d2c15726908f072 -r 2ffc28a9cb69b2ee923e1021bad577c8f8dc26db py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -169,10 +169,8 @@ end = self.lineno try: _, end = source.getstatementrange(end) - except IndexError: + except (IndexError, ValueError): end = self.lineno + 1 - except ValueError: - pass # heuristic to stop displaying source on e.g. # if something: # assume this causes a NameError # # _this_ lines and the one diff -r be8202d2aa3502f240e7308c7d2c15726908f072 -r 2ffc28a9cb69b2ee923e1021bad577c8f8dc26db testing/code/test_excinfo.py --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -270,6 +270,8 @@ for item in excinfo.traceback: print(item) #XXX: for some reason jinja.Template.render is printed in full item.source # shouldnt fail + if item.path.basename == 'test.txt': + assert str(item.source) == '{{ h()}}:' def test_entrysource_Queue_example(): https://bitbucket.org/hpk42/py/changeset/1166a7c0d608/ changeset: 1166a7c0d608 user: RonnyPfannschmidt date: 2011-11-30 18:18:14 summary: add support for ; in iniconfig as comment starter affected #: 3 files diff -r 2ffc28a9cb69b2ee923e1021bad577c8f8dc26db -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -8,6 +8,7 @@ - make trackeback recursion detection more resilent about the eval magic of a decorator library - add py.builtin.next +- iniconfig: add support for ; as comment starter Changes between 1.4.4 and 1.4.5 ================================================== diff -r 2ffc28a9cb69b2ee923e1021bad577c8f8dc26db -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 py/_iniconfig.py --- a/py/_iniconfig.py +++ b/py/_iniconfig.py @@ -103,6 +103,7 @@ def _parseline(self, line, lineno): # comments line = line.split('#')[0].rstrip() + line = line.split(';')[0].rstrip() # blank lines if not line: return None, None diff -r 2ffc28a9cb69b2ee923e1021bad577c8f8dc26db -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 testing/test_iniconfig.py --- a/testing/test_iniconfig.py +++ b/testing/test_iniconfig.py @@ -65,6 +65,19 @@ '[section] #comment', [(0, 'section', None, None)] ), + 'comment2': ( + '; comment', + [] + ), + 'comment2 on value': ( + 'value = 1 ; comment', + [(0, None, 'value', '1')] + ), + + 'comment2 on section': ( + '[section] ;comment', + [(0, 'section', None, None)] + ), 'pseudo section syntax in value': ( 'name = value []', [(0, None, 'name', 'value []')] https://bitbucket.org/hpk42/py/changeset/bb527bc1a414/ changeset: bb527bc1a414 user: RonnyPfannschmidt date: 2011-11-30 18:21:05 summary: merge upstream affected #: 6 files diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -3,6 +3,9 @@ - fix issue7: source.getstatementrange() now raises proper error if no valid statement can be found +- fix issue8: fix code and tests of svnurl/svnwc to work on subversion 1.7 - + note that path.status(updates=1) will not properly work svn-17's status + --xml output is broken. - make source.getstatementrange() more resilent about non-python code frames (as seen from jnja2) - make trackeback recursion detection more resilent diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 py/__init__.py --- a/py/__init__.py +++ b/py/__init__.py @@ -8,7 +8,7 @@ (c) Holger Krekel and others, 2004-2010 """ -__version__ = '1.4.6.dev1' +__version__ = '1.4.6.dev3' from py import _apipkg diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 py/_path/svnurl.py --- a/py/_path/svnurl.py +++ b/py/_path/svnurl.py @@ -233,6 +233,8 @@ e = sys.exc_info()[1] if e.err.find('non-existent in that revision') != -1: raise py.error.ENOENT(self, e.err) + elif e.err.find("E200009:") != -1: + raise py.error.ENOENT(self, e.err) elif e.err.find('File not found') != -1: raise py.error.ENOENT(self, e.err) elif e.err.find('not part of a repository')!=-1: diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 py/_path/svnwc.py --- a/py/_path/svnwc.py +++ b/py/_path/svnwc.py @@ -482,10 +482,13 @@ except py.process.cmdexec.Error: e = sys.exc_info()[1] strerr = e.err.lower() - if strerr.find('file not found') != -1: + if strerr.find('not found') != -1: + raise py.error.ENOENT(self) + elif strerr.find("E200009:") != -1: raise py.error.ENOENT(self) if (strerr.find('file exists') != -1 or strerr.find('file already exists') != -1 or + strerr.find('w150002:') != -1 or strerr.find("can't create directory") != -1): raise py.error.EEXIST(self) raise @@ -593,7 +596,7 @@ out = self._authsvn('lock').strip() if not out: # warning or error, raise exception - raise Exception(out[4:]) + raise ValueError("unknown error in svn lock command") def unlock(self): """ unset a previously set lock """ @@ -1066,6 +1069,8 @@ modrev = '?' author = '?' date = '' + elif itemstatus == "replaced": + pass else: #print entryel.toxml() commitel = entryel.getElementsByTagName('commit')[0] @@ -1148,7 +1153,11 @@ raise ValueError("Not a versioned resource") #raise ValueError, "Not a versioned resource %r" % path self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind'] - self.rev = int(d['revision']) + try: + self.rev = int(d['revision']) + except KeyError: + self.rev = None + self.path = py.path.local(d['path']) self.size = self.path.size() if 'lastchangedrev' in d: diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 setup.py --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ name='py', description='library with cross-python path, ini-parsing, io, code, log facilities', long_description = open('README.txt').read(), - version='1.4.6.dev1', + version='1.4.6.dev3', url='http://pylib.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff -r 1166a7c0d6080fe4ca0c0bd536fd7690a571e233 -r bb527bc1a4145d2531a0fb879aad610c04333605 testing/path/test_svnwc.py --- a/testing/path/test_svnwc.py +++ b/testing/path/test_svnwc.py @@ -1,5 +1,6 @@ import py import os, sys +import pytest from py._path.svnwc import InfoSvnWCCommand, XMLWCStatus, parse_wcinfotime from py._path import svnwc as svncommon from svntestbase import CommonSvnTests @@ -105,6 +106,7 @@ assert r.join('sampledir/otherfile').basename in [item.basename for item in s.unchanged] + @pytest.mark.xfail(reason="svn-1.7 has buggy 'status --xml' output") def test_status_update(self, path1): r = path1 try: @@ -112,6 +114,7 @@ s = r.status(updates=1, rec=1) # Comparing just the file names, because paths are unpredictable # on Windows. (long vs. 8.3 paths) + py.std.pprint.pprint(s.allpath()) assert r.join('anotherfile').basename in [item.basename for item in s.update_available] #assert len(s.update_available) == 1 @@ -122,7 +125,6 @@ p = path1.join("samplefile") p.remove() p.ensure(dir=0) - p.add() try: s = path1.status() assert p.basename in [item.basename for item in s.replaced] @@ -164,8 +166,6 @@ otherrepo, otherrepourl, otherwc = repowc2 d = path1.ensure('sampledir', dir=1) try: - d.remove() - d.add() d.update() d.propset('svn:externals', 'otherwc %s' % (otherwc.url,)) d.update() @@ -181,7 +181,7 @@ def test_status_deleted(self, path1): d = path1.ensure('sampledir', dir=1) d.remove() - d.add() + d.ensure(dir=1) path1.commit() d.ensure('deletefile', dir=0) d.commit() @@ -338,7 +338,7 @@ somefile = root.join('somefile') somefile.ensure(file=True) # not yet added to repo - py.test.raises(py.process.cmdexec.Error, 'somefile.lock()') + py.test.raises((py.process.cmdexec.Error, ValueError), 'somefile.lock()') somefile.write('foo') somefile.commit('test') assert somefile.check(versioned=True) Repository URL: https://bitbucket.org/hpk42/py/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.