[Pytest-commit] commit/pytest: hpk42: Merged in hpk42/pytest-patches/testrefactor (pull request #284)
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Apr 29 16:32:35 CEST 2015
1 new commit in pytest:
https://bitbucket.org/pytest-dev/pytest/commits/7d4a0b78d19b/
Changeset: 7d4a0b78d19b
User: hpk42
Date: 2015-04-29 14:32:28+00:00
Summary: Merged in hpk42/pytest-patches/testrefactor (pull request #284)
majorly refactor pytester and speed/streamline tests
Affected #: 25 files
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -43,6 +43,14 @@
implementations. Use the ``hookwrapper`` mechanism instead already
introduced with pytest-2.7.
+- speed up pytest's own test suite considerably by using inprocess
+ tests by default (testrun can be modified with --runpytest=subprocess
+ to create subprocesses in many places instead). The main
+ APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess"
+ and "runpytest_inprocess" if you need a particular way of running
+ the test. In all cases you get back a RunResult but the inprocess
+ one will also have a "reprec" attribute with the recorded events/reports.
+
2.7.1.dev (compared to 2.7.0)
-----------------------------
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -29,17 +29,24 @@
initialization.
"""
try:
- config = _prepareconfig(args, plugins)
- except ConftestImportFailure:
- e = sys.exc_info()[1]
- tw = py.io.TerminalWriter(sys.stderr)
- for line in traceback.format_exception(*e.excinfo):
- tw.line(line.rstrip(), red=True)
- tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+ try:
+ config = _prepareconfig(args, plugins)
+ except ConftestImportFailure as e:
+ tw = py.io.TerminalWriter(sys.stderr)
+ for line in traceback.format_exception(*e.excinfo):
+ tw.line(line.rstrip(), red=True)
+ tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+ return 4
+ else:
+ try:
+ config.pluginmanager.check_pending()
+ return config.hook.pytest_cmdline_main(config=config)
+ finally:
+ config._ensure_unconfigure()
+ except UsageError as e:
+ for msg in e.args:
+ sys.stderr.write("ERROR: %s\n" %(msg,))
return 4
- else:
- config.pluginmanager.check_pending()
- return config.hook.pytest_cmdline_main(config=config)
class cmdline: # compatibility namespace
main = staticmethod(main)
@@ -81,12 +88,18 @@
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args)
- pluginmanager = get_config().pluginmanager
- if plugins:
- for plugin in plugins:
- pluginmanager.register(plugin)
- return pluginmanager.hook.pytest_cmdline_parse(
- pluginmanager=pluginmanager, args=args)
+ config = get_config()
+ pluginmanager = config.pluginmanager
+ try:
+ if plugins:
+ for plugin in plugins:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+ except BaseException:
+ config._ensure_unconfigure()
+ raise
+
def exclude_pytest_names(name):
return not name.startswith(name) or name == "pytest_plugins" or \
@@ -259,7 +272,10 @@
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
- self.set_blocked(arg[3:])
+ name = arg[3:]
+ self.set_blocked(name)
+ if not name.startswith("pytest_"):
+ self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -83,10 +83,7 @@
initstate = 2
doit(config, session)
except pytest.UsageError:
- args = sys.exc_info()[1].args
- for msg in args:
- sys.stderr.write("ERROR: %s\n" %(msg,))
- session.exitstatus = EXIT_USAGEERROR
+ raise
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -1,5 +1,7 @@
""" (disabled by default) support for testing pytest and pytest plugins. """
+import gc
import sys
+import traceback
import os
import codecs
import re
@@ -15,6 +17,136 @@
from _pytest.main import Session, EXIT_OK
+
+def pytest_addoption(parser):
+ # group = parser.getgroup("pytester", "pytester (self-tests) options")
+ parser.addoption('--lsof',
+ action="store_true", dest="lsof", default=False,
+ help=("run FD checks if lsof is available"))
+
+ parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+ choices=("inprocess", "subprocess", ),
+ help=("run pytest sub runs in tests using an 'inprocess' "
+ "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+ # This might be called multiple times. Only take the first.
+ global _pytest_fullpath
+ try:
+ _pytest_fullpath
+ except NameError:
+ _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+ _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+ if config.getvalue("lsof"):
+ checker = LsofFdLeakChecker()
+ if checker.matching_platform():
+ config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+ def get_open_files(self):
+ out = self._exec_lsof()
+ open_files = self._parse_lsof_output(out)
+ return open_files
+
+ def _exec_lsof(self):
+ pid = os.getpid()
+ return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+ def _parse_lsof_output(self, out):
+ def isopen(line):
+ return line.startswith('f') and ("deleted" not in line and
+ 'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+ open_files = []
+
+ for line in out.split("\n"):
+ if isopen(line):
+ fields = line.split('\0')
+ fd = fields[0][1:]
+ filename = fields[1][1:]
+ if filename.startswith('/'):
+ open_files.append((fd, filename))
+
+ return open_files
+
+ def matching_platform(self):
+ try:
+ py.process.cmdexec("lsof -v")
+ except py.process.cmdexec.Error:
+ return False
+ else:
+ return True
+
+ @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
+ def pytest_runtest_item(self, item):
+ lines1 = self.get_open_files()
+ yield
+ if hasattr(sys, "pypy_version_info"):
+ gc.collect()
+ lines2 = self.get_open_files()
+
+ new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+ leaked_files = [t for t in lines2 if t[0] in new_fds]
+ if leaked_files:
+ error = []
+ error.append("***** %s FD leakage detected" % len(leaked_files))
+ error.extend([str(f) for f in leaked_files])
+ error.append("*** Before:")
+ error.extend([str(f) for f in lines1])
+ error.append("*** After:")
+ error.extend([str(f) for f in lines2])
+ error.append(error[0])
+ error.append("*** function %s:%s: %s " % item.location)
+ pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+ 'python2.7': r'C:\Python27\python.exe',
+ 'python2.6': r'C:\Python26\python.exe',
+ 'python3.1': r'C:\Python31\python.exe',
+ 'python3.2': r'C:\Python32\python.exe',
+ 'python3.3': r'C:\Python33\python.exe',
+ 'python3.4': r'C:\Python34\python.exe',
+ 'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+ try:
+ return cache[name]
+ except KeyError:
+ executable = py.path.local.sysfind(name)
+ if executable:
+ if name == "jython":
+ import subprocess
+ popen = subprocess.Popen([str(executable), "--version"],
+ universal_newlines=True, stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ if not err or "2.5" not in err:
+ executable = None
+ if "2.5.2" in err:
+ executable = None # http://bugs.jython.org/issue1790
+ cache[name] = executable
+ return executable
+
+ at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+ 'pypy', 'pypy3'])
+def anypython(request):
+ name = request.param
+ executable = getexecutable(name)
+ if executable is None:
+ if sys.platform == "win32":
+ executable = winpymap.get(name, None)
+ if executable:
+ executable = py.path.local(executable)
+ if executable.check():
+ return executable
+ pytest.skip("no suitable %s found" % (name,))
+ return executable
+
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
@@ -39,23 +171,6 @@
return [x for x in l if x[0] != "_"]
-def pytest_addoption(parser):
- group = parser.getgroup("pylib")
- group.addoption('--no-tools-on-path',
- action="store_true", dest="notoolsonpath", default=False,
- help=("discover tools on PATH instead of going through py.cmdline.")
- )
-
-def pytest_configure(config):
- # This might be called multiple times. Only take the first.
- global _pytest_fullpath
- try:
- _pytest_fullpath
- except NameError:
- _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
- _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
-
-
class ParsedCall:
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
@@ -201,9 +316,11 @@
return LineMatcher
def pytest_funcarg__testdir(request):
- tmptestdir = TmpTestdir(request)
+ tmptestdir = Testdir(request)
return tmptestdir
+
+
rex_outcome = re.compile("(\d+) (\w+)")
class RunResult:
"""The result of running a command.
@@ -213,10 +330,10 @@
:ret: The return value.
:outlines: List of lines captured from stdout.
:errlines: List of lines captures from stderr.
- :stdout: LineMatcher of stdout, use ``stdout.str()`` to
+ :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used
``stdout.fnmatch_lines()`` method.
- :stderrr: LineMatcher of stderr.
+ :stderrr: :py:class:`LineMatcher` of stderr.
:duration: Duration in seconds.
"""
@@ -229,6 +346,8 @@
self.duration = duration
def parseoutcomes(self):
+ """ Return a dictionary of outcomestring->num from parsing
+ the terminal output that the test process produced."""
for line in reversed(self.outlines):
if 'seconds' in line:
outcomes = rex_outcome.findall(line)
@@ -238,14 +357,17 @@
d[cat] = int(num)
return d
- def assertoutcome(self, passed=0, skipped=0, failed=0):
+ def assert_outcomes(self, passed=0, skipped=0, failed=0):
+ """ assert that the specified outcomes appear with the respective
+ numbers (0 means it didn't occur) in the text output from a test run."""
d = self.parseoutcomes()
assert passed == d.get("passed", 0)
assert skipped == d.get("skipped", 0)
assert failed == d.get("failed", 0)
-class TmpTestdir:
+
+class Testdir:
"""Temporary test directory with tools to test/run py.test itself.
This is based on the ``tmpdir`` fixture but provides a number of
@@ -268,7 +390,6 @@
def __init__(self, request):
self.request = request
- self.Config = request.config.__class__
# XXX remove duplication with tmpdir plugin
basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
name = request.function.__name__
@@ -280,12 +401,18 @@
break
self.tmpdir = tmpdir
self.plugins = []
- self._savesyspath = list(sys.path)
+ self._savesyspath = (list(sys.path), list(sys.meta_path))
+ self._savemodulekeys = set(sys.modules)
self.chdir() # always chdir
self.request.addfinalizer(self.finalize)
+ method = self.request.config.getoption("--runpytest")
+ if method == "inprocess":
+ self._runpytest_method = self.runpytest_inprocess
+ elif method == "subprocess":
+ self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
- return "<TmpTestdir %r>" % (self.tmpdir,)
+ return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
@@ -296,23 +423,22 @@
has finished.
"""
- sys.path[:] = self._savesyspath
+ sys.path[:], sys.meta_path[:] = self._savesyspath
if hasattr(self, '_olddir'):
self._olddir.chdir()
self.delete_loaded_modules()
def delete_loaded_modules(self):
- """Delete modules that have been loaded from tmpdir.
+ """Delete modules that have been loaded during a test.
This allows the interpreter to catch module changes in case
the module is re-imported.
-
"""
- for name, mod in list(sys.modules.items()):
- if mod:
- fn = getattr(mod, '__file__', None)
- if fn and fn.startswith(str(self.tmpdir)):
- del sys.modules[name]
+ for name in set(sys.modules).difference(self._savemodulekeys):
+ # it seems zope.interfaces is keeping some state
+ # (used by twisted related tests)
+ if name != "zope.interface":
+ del sys.modules[name]
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
@@ -503,43 +629,19 @@
l = list(cmdlineargs) + [p]
return self.inline_run(*l)
- def inline_runsource1(self, *args):
- """Run a test module in process using ``pytest.main()``.
-
- This behaves exactly like :py:meth:`inline_runsource` and
- takes identical arguments. However the return value is a list
- of the reports created by the pytest_runtest_logreport hook
- during the run.
-
- """
- args = list(args)
- source = args.pop()
- p = self.makepyfile(source)
- l = list(args) + [p]
- reprec = self.inline_run(*l)
- reports = reprec.getreports("pytest_runtest_logreport")
- assert len(reports) == 3, reports # setup/call/teardown
- return reports[1]
-
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Retuns a tuple of the collected items and a
:py:class:`HookRecorder` instance.
- """
- return self.inprocess_run(list(args) + ['--collectonly'])
-
- def inprocess_run(self, args, plugins=()):
- """Run ``pytest.main()`` in-process, return Items and a HookRecorder.
-
This runs the :py:func:`pytest.main` function to run all of
py.test inside the test process itself like
:py:meth:`inline_run`. However the return value is a tuple of
the collection items and a :py:class:`HookRecorder` instance.
"""
- rec = self.inline_run(*args, plugins=plugins)
+ rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
@@ -568,12 +670,50 @@
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
- assert len(rec) == 1
- reprec = rec[0]
+ self.delete_loaded_modules()
+ if len(rec) == 1:
+ reprec = rec.pop()
+ else:
+ class reprec:
+ pass
reprec.ret = ret
- self.delete_loaded_modules()
return reprec
+ def runpytest_inprocess(self, *args, **kwargs):
+ """ Return result of running pytest in-process, providing a similar
+ interface to what self.runpytest() provides. """
+ if kwargs.get("syspathinsert"):
+ self.syspathinsert()
+ now = time.time()
+ capture = py.io.StdCapture()
+ try:
+ try:
+ reprec = self.inline_run(*args)
+ except SystemExit as e:
+ class reprec:
+ ret = e.args[0]
+ except Exception:
+ traceback.print_exc()
+ class reprec:
+ ret = 3
+ finally:
+ out, err = capture.reset()
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+ res = RunResult(reprec.ret,
+ out.split("\n"), err.split("\n"),
+ time.time()-now)
+ res.reprec = reprec
+ return res
+
+ def runpytest(self, *args, **kwargs):
+ """ Run pytest inline or in a subprocess, depending on the command line
+ option "--runpytest" and return a :py:class:`RunResult`.
+
+ """
+ return self._runpytest_method(*args, **kwargs)
+
def parseconfig(self, *args):
"""Return a new py.test Config instance from given commandline args.
@@ -745,57 +885,23 @@
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
- def runpybin(self, scriptname, *args):
- """Run a py.* tool with arguments.
+ def _getpytestargs(self):
+ # we cannot use "(sys.executable,script)"
+ # because on windows the script is e.g. a py.test.exe
+ return (sys.executable, _pytest_fullpath,) # noqa
- This can realy only be used to run py.test, you probably want
- :py:meth:`runpytest` instead.
+ def runpython(self, script):
+ """Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
-
"""
- fullargs = self._getpybinargs(scriptname) + args
- return self.run(*fullargs)
-
- def _getpybinargs(self, scriptname):
- if not self.request.config.getvalue("notoolsonpath"):
- # XXX we rely on script referring to the correct environment
- # we cannot use "(sys.executable,script)"
- # because on windows the script is e.g. a py.test.exe
- return (sys.executable, _pytest_fullpath,) # noqa
- else:
- pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
-
- def runpython(self, script, prepend=True):
- """Run a python script.
-
- If ``prepend`` is True then the directory from which the py
- package has been imported will be prepended to sys.path.
-
- Returns a :py:class:`RunResult`.
-
- """
- # XXX The prepend feature is probably not very useful since the
- # split of py and pytest.
- if prepend:
- s = self._getsysprepend()
- if s:
- script.write(s + "\n" + script.read())
return self.run(sys.executable, script)
- def _getsysprepend(self):
- if self.request.config.getvalue("notoolsonpath"):
- s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
- else:
- s = ""
- return s
-
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
- command = self._getsysprepend() + command
return self.run(sys.executable, "-c", command)
- def runpytest(self, *args):
+ def runpytest_subprocess(self, *args, **kwargs):
"""Run py.test as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added
@@ -820,7 +926,8 @@
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ('-p', plugins[0]) + args
- return self.runpybin("py.test", *args)
+ args = self._getpytestargs() + args
+ return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run py.test using pexpect.
@@ -831,10 +938,8 @@
The pexpect child is returned.
"""
- if self.request.config.getvalue("notoolsonpath"):
- pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests")
basetemp = self.tmpdir.mkdir("pexpect")
- invoke = " ".join(map(str, self._getpybinargs("py.test")))
+ invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c doc/en/example/assertion/test_failures.py
--- a/doc/en/example/assertion/test_failures.py
+++ b/doc/en/example/assertion/test_failures.py
@@ -7,7 +7,7 @@
target = testdir.tmpdir.join(failure_demo.basename)
failure_demo.copy(target)
failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
- result = testdir.runpytest(target)
+ result = testdir.runpytest(target, syspathinsert=True)
result.stdout.fnmatch_lines([
"*42 failed*"
])
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c doc/en/writing_plugins.txt
--- a/doc/en/writing_plugins.txt
+++ b/doc/en/writing_plugins.txt
@@ -186,12 +186,44 @@
If you want to look at the names of existing plugins, use
the ``--traceconfig`` option.
+Testing plugins
+---------------
+
+pytest comes with some facilities that you can enable for testing your
+plugin. Given that you have an installed plugin you can enable the
+:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a
+command line option to include the pytester plugin (``-p pytester``) or
+by putting ``pytest_plugins = pytester`` into your test or
+``conftest.py`` file. You then will have a ``testdir`` fixure which you
+can use like this::
+
+ # content of test_myplugin.py
+
+ pytest_plugins = pytester # to get testdir fixture
+
+ def test_myplugin(testdir):
+ testdir.makepyfile("""
+ def test_example():
+ pass
+ """)
+ result = testdir.runpytest("--verbose")
+ result.fnmatch_lines("""
+ test_example*
+ """)
+
+Note that by default ``testdir.runpytest()`` will perform a pytest
+in-process. You can pass the command line option ``--runpytest=subprocess``
+to have it happen in a subprocess.
+
+Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more
+methods of the result object that you get from a call to ``runpytest``.
.. _`writinghooks`:
Writing hook functions
======================
+
.. _validation:
hook function validation and execution
@@ -493,3 +525,13 @@
.. autoclass:: _pytest.core.CallOutcome()
:members:
+.. currentmodule:: _pytest.pytester
+
+.. autoclass:: Testdir()
+ :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
+
+.. autoclass:: RunResult()
+ :members:
+
+.. autoclass:: LineMatcher()
+ :members:
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -82,7 +82,7 @@
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
""")
- result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123")
+ result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines([
'*1 passed*',
@@ -203,7 +203,7 @@
os.chdir(os.path.dirname(os.getcwd()))
print (py.log)
"""))
- result = testdir.runpython(p, prepend=False)
+ result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
@@ -353,7 +353,8 @@
*unrecognized*
""")
- def test_getsourcelines_error_issue553(self, testdir):
+ def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
+ monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile("""
def raise_error(obj):
raise IOError('source code not available')
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/conftest.py
--- a/testing/conftest.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import pytest
-import sys
-
-pytest_plugins = "pytester",
-
-import os, py
-
-class LsofFdLeakChecker(object):
- def get_open_files(self):
- out = self._exec_lsof()
- open_files = self._parse_lsof_output(out)
- return open_files
-
- def _exec_lsof(self):
- pid = os.getpid()
- return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
-
- def _parse_lsof_output(self, out):
- def isopen(line):
- return line.startswith('f') and (
- "deleted" not in line and 'mem' not in line and "txt" not in line and 'cwd' not in line)
-
- open_files = []
-
- for line in out.split("\n"):
- if isopen(line):
- fields = line.split('\0')
- fd = fields[0][1:]
- filename = fields[1][1:]
- if filename.startswith('/'):
- open_files.append((fd, filename))
-
- return open_files
-
-
-def pytest_addoption(parser):
- parser.addoption('--lsof',
- action="store_true", dest="lsof", default=False,
- help=("run FD checks if lsof is available"))
-
-def pytest_runtest_setup(item):
- config = item.config
- config._basedir = py.path.local()
- if config.getvalue("lsof"):
- try:
- config._fd_leak_checker = LsofFdLeakChecker()
- config._openfiles = config._fd_leak_checker.get_open_files()
- except py.process.cmdexec.Error:
- pass
-
-#def pytest_report_header():
-# return "pid: %s" % os.getpid()
-
-def check_open_files(config):
- lines2 = config._fd_leak_checker.get_open_files()
- new_fds = set([t[0] for t in lines2]) - set([t[0] for t in config._openfiles])
- open_files = [t for t in lines2 if t[0] in new_fds]
- if open_files:
- error = []
- error.append("***** %s FD leakage detected" % len(open_files))
- error.extend([str(f) for f in open_files])
- error.append("*** Before:")
- error.extend([str(f) for f in config._openfiles])
- error.append("*** After:")
- error.extend([str(f) for f in lines2])
- error.append(error[0])
- raise AssertionError("\n".join(error))
-
- at pytest.hookimpl_opts(hookwrapper=True, trylast=True)
-def pytest_runtest_teardown(item):
- yield
- item.config._basedir.chdir()
- if hasattr(item.config, '_openfiles'):
- check_open_files(item.config)
-
-# XXX copied from execnet's conftest.py - needs to be merged
-winpymap = {
- 'python2.7': r'C:\Python27\python.exe',
- 'python2.6': r'C:\Python26\python.exe',
- 'python3.1': r'C:\Python31\python.exe',
- 'python3.2': r'C:\Python32\python.exe',
- 'python3.3': r'C:\Python33\python.exe',
- 'python3.4': r'C:\Python34\python.exe',
- 'python3.5': r'C:\Python35\python.exe',
-}
-
-def getexecutable(name, cache={}):
- try:
- return cache[name]
- except KeyError:
- executable = py.path.local.sysfind(name)
- if executable:
- if name == "jython":
- import subprocess
- popen = subprocess.Popen([str(executable), "--version"],
- universal_newlines=True, stderr=subprocess.PIPE)
- out, err = popen.communicate()
- if not err or "2.5" not in err:
- executable = None
- if "2.5.2" in err:
- executable = None # http://bugs.jython.org/issue1790
- cache[name] = executable
- return executable
-
- at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
- 'pypy', 'pypy3'])
-def anypython(request):
- name = request.param
- executable = getexecutable(name)
- if executable is None:
- if sys.platform == "win32":
- executable = winpymap.get(name, None)
- if executable:
- executable = py.path.local(executable)
- if executable.check():
- return executable
- pytest.skip("no suitable %s found" % (name,))
- return executable
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -627,9 +627,7 @@
sub1.join("test_in_sub1.py").write("def test_1(): pass")
sub2.join("test_in_sub2.py").write("def test_2(): pass")
result = testdir.runpytest("-v", "-s")
- result.stdout.fnmatch_lines([
- "*2 passed*"
- ])
+ result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(testdir):
modcol = testdir.getmodulecol("pass", withinit=True)
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -100,9 +100,7 @@
sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
result = testdir.runpytest("-v")
- result.stdout.fnmatch_lines([
- "*2 passed*"
- ])
+ result.assert_outcomes(passed=2)
def test_extend_fixture_module_class(self, testdir):
testfile = testdir.makepyfile("""
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -292,9 +292,7 @@
""")
result = testdir.runpytest()
assert result.ret == 1
- result.stdout.fnmatch_lines([
- "*6 fail*",
- ])
+ result.assert_outcomes(failed=6)
def test_parametrize_CSV(self, testdir):
testdir.makepyfile("""
@@ -375,7 +373,7 @@
assert metafunc.cls == TestClass
""")
result = testdir.runpytest(p, "-v")
- result.assertoutcome(passed=2)
+ result.assert_outcomes(passed=2)
def test_addcall_with_two_funcargs_generators(self, testdir):
testdir.makeconftest("""
@@ -430,9 +428,7 @@
pass
""")
result = testdir.runpytest(p)
- result.stdout.fnmatch_lines([
- "*1 pass*",
- ])
+ result.assert_outcomes(passed=1)
def test_generate_plugin_and_module(self, testdir):
@@ -506,9 +502,7 @@
self.val = 1
""")
result = testdir.runpytest(p)
- result.stdout.fnmatch_lines([
- "*1 pass*",
- ])
+ result.assert_outcomes(passed=1)
def test_parametrize_functional2(self, testdir):
testdir.makepyfile("""
@@ -653,8 +647,8 @@
def test_function():
pass
""")
- reprec = testdir.inline_run()
- reprec.assertoutcome(passed=1)
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=1)
def test_generate_tests_only_done_in_subdir(self, testdir):
sub1 = testdir.mkpydir("sub1")
@@ -670,9 +664,7 @@
sub1.join("test_in_sub1.py").write("def test_1(): pass")
sub2.join("test_in_sub2.py").write("def test_2(): pass")
result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
- result.stdout.fnmatch_lines([
- "*3 passed*"
- ])
+ result.assert_outcomes(passed=3)
def test_generate_same_function_names_issue403(self, testdir):
testdir.makepyfile("""
@@ -687,8 +679,8 @@
test_x = make_tests()
test_y = make_tests()
""")
- reprec = testdir.inline_run()
- reprec.assertoutcome(passed=4)
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=4)
@pytest.mark.issue463
def test_parameterize_misspelling(self, testdir):
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_assertion.py
--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -461,7 +461,7 @@
("--assert=plain", "--nomagic"),
("--assert=plain", "--no-assert", "--nomagic"))
for opt in off_options:
- result = testdir.runpytest(*opt)
+ result = testdir.runpytest_subprocess(*opt)
assert "3 == 4" not in result.stdout.str()
def test_old_assert_mode(testdir):
@@ -469,7 +469,7 @@
def test_in_old_mode():
assert "@py_builtins" not in globals()
""")
- result = testdir.runpytest("--assert=reinterp")
+ result = testdir.runpytest_subprocess("--assert=reinterp")
assert result.ret == 0
def test_triple_quoted_string_issue113(testdir):
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_assertrewrite.py
--- a/testing/test_assertrewrite.py
+++ b/testing/test_assertrewrite.py
@@ -453,7 +453,7 @@
assert not os.path.exists(__cached__)
assert not os.path.exists(os.path.dirname(__cached__))""")
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
- assert testdir.runpytest().ret == 0
+ assert testdir.runpytest_subprocess().ret == 0
@pytest.mark.skipif('"__pypy__" in sys.modules')
def test_pyc_vs_pyo(self, testdir, monkeypatch):
@@ -468,12 +468,12 @@
tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
- assert testdir.runpybin("py.test", tmp).ret == 0
+ assert testdir.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
- assert testdir.runpybin("py.test", tmp).ret == 1
+ assert testdir.runpytest_subprocess(tmp).ret == 1
assert tagged + ".pyc" in os.listdir("__pycache__")
def test_package(self, testdir):
@@ -615,10 +615,8 @@
testdir.makepyfile(**contents)
testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
- result = testdir.runpytest()
- result.stdout.fnmatch_lines([
- '* 1 passed*',
- ])
+ result = testdir.runpytest_subprocess()
+ result.assert_outcomes(passed=1)
def test_read_pyc(self, tmpdir):
"""
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_capture.py
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -282,7 +282,7 @@
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
- result = testdir.runpytest(p)
+ result = testdir.runpytest_subprocess(p)
result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
@@ -301,7 +301,7 @@
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
- result = testdir.runpytest(p, *optargs)
+ result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
@@ -327,7 +327,7 @@
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
- result = testdir.runpytest(p, *optargs)
+ result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
@@ -348,7 +348,7 @@
logging.warn("hello432")
assert 0
""")
- result = testdir.runpytest(
+ result = testdir.runpytest_subprocess(
p, "--traceconfig",
"-p", "no:capturelog")
assert result.ret != 0
@@ -364,7 +364,7 @@
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
- result = testdir.runpytest("-s", "-p", "no:capturelog")
+ result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == 0
result.stderr.fnmatch_lines([
"WARNING*hello435*",
@@ -383,7 +383,7 @@
logging.warn("hello433")
assert 0
""")
- result = testdir.runpytest(p, "-p", "no:capturelog")
+ result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
@@ -461,7 +461,7 @@
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
- result = testdir.runpytest(p)
+ result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
@@ -474,7 +474,7 @@
def test_log(capsys):
logging.error('x')
""")
- result = testdir.runpytest(p)
+ result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
@@ -500,7 +500,7 @@
def test_hello(capfd):
pass
""")
- result = testdir.runpytest("--capture=no")
+ result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
@@ -563,9 +563,7 @@
test_foo()
""")
result = testdir.runpytest('--assert=plain')
- result.stdout.fnmatch_lines([
- '*2 passed*',
- ])
+ result.assert_outcomes(passed=2)
class TestTextIO:
@@ -885,7 +883,7 @@
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
- result = testdir.runpytest()
+ result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
@@ -936,7 +934,7 @@
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
- result = testdir.runpytest("--capture=fd")
+ result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
@@ -971,7 +969,7 @@
os.write(1, b"hello\\n")
assert 0
""")
- result = testdir.runpytest()
+ result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_collection.py
--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -296,7 +296,6 @@
subdir.ensure("__init__.py")
target = subdir.join(p.basename)
p.move(target)
- testdir.chdir()
subdir.chdir()
config = testdir.parseconfig(p.basename)
rcol = Session(config=config)
@@ -313,7 +312,7 @@
def test_collect_topdir(self, testdir):
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
- # XXX migrate to inline_genitems? (see below)
+ # XXX migrate to collectonly? (see below)
config = testdir.parseconfig(id)
topdir = testdir.tmpdir
rcol = Session(config)
@@ -470,7 +469,6 @@
assert col.config is config
def test_pkgfile(self, testdir):
- testdir.chdir()
tmpdir = testdir.tmpdir
subdir = tmpdir.join("subdir")
x = subdir.ensure("x.py")
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -75,7 +75,7 @@
[pytest]
addopts = --qwe
""")
- result = testdir.runpytest("--confcutdir=.")
+ result = testdir.inline_run("--confcutdir=.")
assert result.ret == 0
class TestConfigCmdlineParsing:
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_core.py
--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -961,7 +961,7 @@
""")
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
- result = testdir.runpytest("-rw", "-p", "skipping1", "--traceconfig")
+ result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines([
"WI1*skipped plugin*skipping1*hello*",
@@ -990,7 +990,7 @@
assert plugin is not None
""")
monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
- result = testdir.runpytest(p)
+ result = testdir.runpytest(p, syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_doctest.py
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -1,5 +1,5 @@
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
-import py, pytest
+import py
class TestDoctests:
@@ -75,8 +75,6 @@
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
- @pytest.mark.xfail('hasattr(sys, "pypy_version_info")', reason=
- "pypy leaks one FD")
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_genscript.py
--- a/testing/test_genscript.py
+++ b/testing/test_genscript.py
@@ -16,7 +16,6 @@
assert self.script.check()
def run(self, anypython, testdir, *args):
- testdir.chdir()
return testdir._run(anypython, self.script, *args)
def test_gen(testdir, anypython, standalone):
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_helpconfig.py
--- a/testing/test_helpconfig.py
+++ b/testing/test_helpconfig.py
@@ -53,14 +53,14 @@
])
def test_debug(testdir, monkeypatch):
- result = testdir.runpytest("--debug")
+ result = testdir.runpytest_subprocess("--debug")
assert result.ret == 0
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
- result = testdir.runpytest()
+ result = testdir.runpytest_subprocess()
assert result.ret == 0
result.stderr.fnmatch_lines([
"*pytest_plugin_registered*",
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_nose.py
--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -19,9 +19,7 @@
test_hello.teardown = lambda: l.append(2)
""")
result = testdir.runpytest(p, '-p', 'nose')
- result.stdout.fnmatch_lines([
- "*2 passed*"
- ])
+ result.assert_outcomes(passed=2)
def test_setup_func_with_setup_decorator():
@@ -66,9 +64,7 @@
""")
result = testdir.runpytest(p, '-p', 'nose')
- result.stdout.fnmatch_lines([
- "*2 passed*"
- ])
+ result.assert_outcomes(passed=2)
def test_nose_setup_func_failure(testdir):
@@ -302,7 +298,7 @@
pass
""")
result = testdir.runpytest()
- result.stdout.fnmatch_lines("*1 passed*")
+ result.assert_outcomes(passed=1)
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_setup_teardown_linking_issue265(testdir):
@@ -327,8 +323,8 @@
"""Undoes the setup."""
raise Exception("should not call teardown for skipped tests")
''')
- reprec = testdir.inline_run()
- reprec.assertoutcome(passed=1, skipped=1)
+ reprec = testdir.runpytest()
+ reprec.assert_outcomes(passed=1, skipped=1)
def test_SkipTest_during_collection(testdir):
@@ -339,7 +335,7 @@
assert False
""")
result = testdir.runpytest(p)
- result.assertoutcome(skipped=1)
+ result.assert_outcomes(skipped=1)
def test_SkipTest_in_test(testdir):
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_pdb.py
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -2,6 +2,13 @@
import py
import sys
+def runpdb_and_get_report(testdir, source):
+ p = testdir.makepyfile(source)
+ result = testdir.runpytest_inprocess("--pdb", p)
+ reports = result.reprec.getreports("pytest_runtest_logreport")
+ assert len(reports) == 3, reports # setup/call/teardown
+ return reports[1]
+
class TestPDB:
def pytest_funcarg__pdblist(self, request):
@@ -14,7 +21,7 @@
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
- rep = testdir.inline_runsource1('--pdb', """
+ rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
@@ -24,7 +31,7 @@
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
- rep = testdir.inline_runsource1('--pdb', """
+ rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
@@ -34,7 +41,7 @@
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
- rep = testdir.inline_runsource1('--pdb', """
+ rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
@@ -43,7 +50,7 @@
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
- rep = testdir.inline_runsource1('--pdb', """
+ rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
@@ -260,7 +267,7 @@
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
- result = testdir.runpytest("--pdb", p1)
+ result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_pytester.py
--- a/testing/test_pytester.py
+++ b/testing/test_pytester.py
@@ -69,9 +69,7 @@
assert 1
""")
result = testdir.runpytest()
- result.stdout.fnmatch_lines([
- "*1 passed*"
- ])
+ result.assert_outcomes(passed=1)
def make_holder():
@@ -114,16 +112,6 @@
unichr = chr
testdir.makepyfile(unichr(0xfffd))
-def test_inprocess_plugins(testdir):
- class Plugin(object):
- configured = False
- def pytest_configure(self, config):
- self.configured = True
- plugin = Plugin()
- testdir.inprocess_run([], [plugin])
-
- assert plugin.configured
-
def test_inline_run_clean_modules(testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_session.py
--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -203,7 +203,6 @@
def test_plugin_specify(testdir):
- testdir.chdir()
pytest.raises(ImportError, """
testdir.parseconfig("-p", "nqweotexistent")
""")
diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c tox.ini
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
distshare={homedir}/.tox/distshare
-envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,py27-trial,py33-trial,doctesting,py27-cxfreeze
+envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,{py27,py33}-trial,py27-subprocess,doctesting,py27-cxfreeze
[testenv]
changedir=testing
@@ -9,6 +9,15 @@
nose
mock
+[testenv:py27-subprocess]
+changedir=.
+basepython=python2.7
+deps=pytest-xdist
+ mock
+ nose
+commands=
+ py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing}
+
[testenv:genscript]
changedir=.
commands= py.test --genscript=pytest1
@@ -136,7 +145,7 @@
minversion=2.0
plugins=pytester
#--pyargs --doctest-modules --ignore=.tox
-addopts= -rxsX
+addopts= -rxsX -p pytester
rsyncdirs=tox.ini pytest.py _pytest testing
python_files=test_*.py *_test.py testing/*/*.py
python_classes=Test Acceptance
Repository URL: https://bitbucket.org/pytest-dev/pytest/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the pytest-commit
mailing list