[Pytest-commit] commit/pytest: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Apr 29 16:32:36 CEST 2015


6 new commits in pytest:

https://bitbucket.org/pytest-dev/pytest/commits/d7f0b42aa824/
Changeset:   d7f0b42aa824
Branch:      testrefactor
User:        hpk42
Date:        2015-04-28 09:54:45+00:00
Summary:     - make API between runpytest() and inline_run() more similar
- shift a number of tests to become inline_run() tests
Affected #:  9 files

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -29,17 +29,21 @@
                   initialization.
     """
     try:
-        config = _prepareconfig(args, plugins)
-    except ConftestImportFailure:
-        e = sys.exc_info()[1]
-        tw = py.io.TerminalWriter(sys.stderr)
-        for line in traceback.format_exception(*e.excinfo):
-            tw.line(line.rstrip(), red=True)
-        tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+        try:
+            config = _prepareconfig(args, plugins)
+        except ConftestImportFailure as e:
+            tw = py.io.TerminalWriter(sys.stderr)
+            for line in traceback.format_exception(*e.excinfo):
+                tw.line(line.rstrip(), red=True)
+            tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+            return 4
+        else:
+            config.pluginmanager.check_pending()
+            return config.hook.pytest_cmdline_main(config=config)
+    except UsageError as e:
+        for msg in e.args:
+            sys.stderr.write("ERROR: %s\n" %(msg,))
         return 4
-    else:
-        config.pluginmanager.check_pending()
-        return config.hook.pytest_cmdline_main(config=config)
 
 class cmdline:  # compatibility namespace
     main = staticmethod(main)

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b _pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -83,10 +83,7 @@
             initstate = 2
             doit(config, session)
         except pytest.UsageError:
-            args = sys.exc_info()[1].args
-            for msg in args:
-                sys.stderr.write("ERROR: %s\n" %(msg,))
-            session.exitstatus = EXIT_USAGEERROR
+            raise
         except KeyboardInterrupt:
             excinfo = py.code.ExceptionInfo()
             config.hook.pytest_keyboard_interrupt(excinfo=excinfo)

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -204,6 +204,8 @@
     tmptestdir = TmpTestdir(request)
     return tmptestdir
 
+
+
 rex_outcome = re.compile("(\d+) (\w+)")
 class RunResult:
     """The result of running a command.
@@ -229,6 +231,8 @@
         self.duration = duration
 
     def parseoutcomes(self):
+        """ Return a dictionary of outcomestring->num from parsing
+        the terminal output that the test process produced."""
         for line in reversed(self.outlines):
             if 'seconds' in line:
                 outcomes = rex_outcome.findall(line)
@@ -238,13 +242,16 @@
                         d[cat] = int(num)
                     return d
 
-    def assertoutcome(self, passed=0, skipped=0, failed=0):
+    def assert_outcomes(self, passed=0, skipped=0, failed=0):
+        """ assert that the specified outcomes appear with the respective
+        numbers (0 means it didn't occur) in the text output from a test run."""
         d = self.parseoutcomes()
         assert passed == d.get("passed", 0)
         assert skipped == d.get("skipped", 0)
         assert failed == d.get("failed", 0)
 
 
+
 class TmpTestdir:
     """Temporary test directory with tools to test/run py.test itself.
 
@@ -568,12 +575,32 @@
         plugins = kwargs.get("plugins") or []
         plugins.append(Collect())
         ret = pytest.main(list(args), plugins=plugins)
-        assert len(rec) == 1
-        reprec = rec[0]
+        self.delete_loaded_modules()
+        if len(rec) == 1:
+            reprec = rec.pop()
+        else:
+            class reprec:
+                pass
         reprec.ret = ret
-        self.delete_loaded_modules()
         return reprec
 
+    def inline_runpytest(self, *args):
+        """ Return result of running pytest in-process, providing a similar
+        interface to what self.runpytest() provides. """
+        now = time.time()
+        capture = py.io.StdCaptureFD()
+        try:
+            reprec = self.inline_run(*args)
+        finally:
+            out, err = capture.reset()
+        assert out or err
+
+        res = RunResult(reprec.ret,
+                        out.split("\n"), err.split("\n"),
+                        time.time()-now)
+        res.reprec = reprec
+        return res
+
     def parseconfig(self, *args):
         """Return a new py.test Config instance from given commandline args.
 

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -15,7 +15,7 @@
         p.pyimport()
         del py.std.sys.modules['test_whatever']
         b.ensure("test_whatever.py")
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*import*mismatch*",
             "*imported*test_whatever*",
@@ -59,7 +59,7 @@
                 def __init__(self):
                     pass
         """)
-        result = testdir.runpytest("-rw")
+        result = testdir.inline_runpytest("-rw")
         result.stdout.fnmatch_lines_random("""
             WC1*test_class_with_init_warning.py*__init__*
         """)
@@ -69,7 +69,7 @@
             class test(object):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*collected 0*",
         ])
@@ -86,7 +86,7 @@
                 def teardown_class(cls):
                     pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*1 passed*",
         ])
@@ -534,7 +534,7 @@
         """)
         testdir.makepyfile("def test_some(): pass")
         testdir.makepyfile(test_xyz="def test_func(): pass")
-        result = testdir.runpytest("--collect-only")
+        result = testdir.inline_runpytest("--collect-only")
         result.stdout.fnmatch_lines([
             "*<Module*test_pytest*",
             "*<MyModule*xyz*",
@@ -590,7 +590,7 @@
                     return MyFunction(name, collector)
         """)
         testdir.makepyfile("def some(): pass")
-        result = testdir.runpytest("--collect-only")
+        result = testdir.inline_runpytest("--collect-only")
         result.stdout.fnmatch_lines([
             "*MyFunction*some*",
         ])
@@ -626,7 +626,7 @@
     """))
     sub1.join("test_in_sub1.py").write("def test_1(): pass")
     sub2.join("test_in_sub2.py").write("def test_2(): pass")
-    result = testdir.runpytest("-v", "-s")
+    result = testdir.inline_runpytest("-v", "-s")
     result.stdout.fnmatch_lines([
         "*2 passed*"
     ])
@@ -650,7 +650,7 @@
                 raise ValueError("xyz")
         """)
         p = testdir.makepyfile("def test(hello): pass")
-        result = testdir.runpytest(p)
+        result = testdir.inline_runpytest(p)
         assert result.ret != 0
         out = result.stdout.str()
         assert out.find("xyz") != -1
@@ -658,7 +658,7 @@
         numentries = out.count("_ _ _") # separator for traceback entries
         assert numentries == 0
 
-        result = testdir.runpytest("--fulltrace", p)
+        result = testdir.inline_runpytest("--fulltrace", p)
         out = result.stdout.str()
         assert out.find("conftest.py:2: ValueError") != -1
         numentries = out.count("_ _ _ _") # separator for traceback entries
@@ -671,7 +671,7 @@
             x = 17
             asd
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         out = result.stdout.str()
         assert "x = 1" not in out
@@ -680,7 +680,7 @@
             " *asd*",
             "E*NameError*",
         ])
-        result = testdir.runpytest("--fulltrace")
+        result = testdir.inline_runpytest("--fulltrace")
         out = result.stdout.str()
         assert "x = 1" in out
         assert "x = 2" in out
@@ -771,7 +771,7 @@
     """)
     p2 = p.new(basename=p.basename.replace("test", "check"))
     p.move(p2)
-    result = testdir.runpytest("--collect-only", "-s")
+    result = testdir.inline_runpytest("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*check_customized*",
         "*check_simple*",
@@ -779,7 +779,7 @@
         "*check_meth*",
     ])
 
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*2 passed*",
@@ -795,12 +795,12 @@
         def _test_underscore():
             pass
     """)
-    result = testdir.runpytest("--collect-only", "-s")
+    result = testdir.inline_runpytest("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*_test_underscore*",
     ])
 
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*1 passed*",
@@ -820,7 +820,7 @@
          def test_hello():
             pass
     """)
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -844,7 +844,7 @@
             def test_hello(self):
                 pass
     """)
-    result = testdir.runpytest("--collect-only")
+    result = testdir.inline_runpytest("--collect-only")
     result.stdout.fnmatch_lines([
         "*MyClass*",
         "*MyInstance*",
@@ -864,6 +864,6 @@
             return Test
         TestFoo = make_test()
     """)
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     assert "TypeError" not in result.stdout.str()
     assert result.ret == 0

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -33,7 +33,7 @@
             def test_func(some):
                 pass
         """)
-        result = testdir.runpytest() # "--collect-only")
+        result = testdir.inline_runpytest() # "--collect-only")
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def test_func(some)*",
@@ -78,7 +78,7 @@
                 def test_method(self, something):
                     assert something is self
         """)
-        result = testdir.runpytest(p)
+        result = testdir.inline_runpytest(p)
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -99,7 +99,7 @@
 
         sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
         sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines([
             "*2 passed*"
         ])
@@ -121,9 +121,9 @@
                  def test_spam(self, spam):
                      assert spam == 'spamspam'
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_module(self, testdir):
@@ -144,9 +144,9 @@
             def test_spam(spam):
                 assert spam == 'spamspam'
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_conftest(self, testdir):
@@ -170,9 +170,9 @@
             def test_spam(spam):
                 assert spam == "spamspam"
         """))
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_plugin(self, testdir):
@@ -197,7 +197,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.runpytest('-s')
+        result = testdir.inline_runpytest('-s')
         assert result.ret == 0
 
     def test_extend_fixture_plugin_plugin(self, testdir):
@@ -223,7 +223,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret == 0
 
     def test_override_parametrized_fixture_conftest_module(self, testdir):
@@ -245,9 +245,9 @@
             def test_spam(spam):
                 assert spam == 'spam'
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_parametrized_fixture_conftest_conftest(self, testdir):
@@ -272,9 +272,9 @@
             def test_spam(spam):
                 assert spam == "spam"
         """))
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_module(self, testdir):
@@ -299,9 +299,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
@@ -329,9 +329,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """))
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.runpytest(testfile)
+        result = testdir.inline_runpytest(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_autouse_fixture_plugin(self, testdir):
@@ -351,7 +351,7 @@
             def test_foo(request):
                 assert request.function.foo == 7
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret == 0
 
     def test_funcarg_lookup_error(self, testdir):
@@ -359,7 +359,7 @@
             def test_lookup_error(unknown):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*ERROR*test_lookup_error*",
             "*def test_lookup_error(unknown):*",
@@ -388,7 +388,7 @@
                     traceback.print_exc()
                 assert sys.exc_info() == (None, None, None)
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret == 0
 
 
@@ -531,7 +531,7 @@
             def test_second():
                 assert len(l) == 1
         """)
-        result = testdir.runpytest(p)
+        result = testdir.inline_runpytest(p)
         result.stdout.fnmatch_lines([
             "*1 error*"  # XXX the whole module collection fails
             ])
@@ -616,7 +616,7 @@
         """))
         p = b.join("test_module.py")
         p.write("def test_func(arg1): pass")
-        result = testdir.runpytest(p, "--fixtures")
+        result = testdir.inline_runpytest(p, "--fixtures")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *fixtures defined*conftest*
@@ -785,7 +785,7 @@
             def test_two_different_setups(arg1, arg2):
                 assert arg1 != arg2
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -800,7 +800,7 @@
             def test_two_funcarg(arg1):
                 assert arg1 == 11
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -827,7 +827,7 @@
             def test_check_test0_has_teardown_correct():
                 assert test_0.l == [2]
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines([
             "*3 passed*"
         ])
@@ -843,7 +843,7 @@
             def test_func(app):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*3/x*",
@@ -898,7 +898,7 @@
             def test_add(arg2):
                 assert arg2 == 2
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -920,7 +920,7 @@
             def test_add(arg1, arg2):
                 assert arg2 == 2
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -944,7 +944,7 @@
                 assert arg2 == arg1 + 1
                 assert len(l) == arg1
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*2 passed*"
         ])
@@ -964,7 +964,7 @@
             def test_missing(call_fail):
                 pass
             """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines("""
             *pytest.fixture()*
             *def call_fail(fail)*
@@ -1046,7 +1046,7 @@
         reprec.assertoutcome(passed=2)
 
     def test_usefixtures_seen_in_showmarkers(self, testdir):
-        result = testdir.runpytest("--markers")
+        result = testdir.inline_runpytest("--markers")
         result.stdout.fnmatch_lines("""
             *usefixtures(fixturename1*mark tests*fixtures*
         """)
@@ -1313,7 +1313,7 @@
         conftest.move(a.join(conftest.basename))
         a.join("test_something.py").write("def test_func(): pass")
         b.join("test_otherthing.py").write("def test_func(): pass")
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines("""
             *1 passed*1 error*
         """)
@@ -1767,7 +1767,7 @@
                 def test_1(arg):
                     pass
             """ % method)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*You tried*function*session*request*",
@@ -1825,7 +1825,7 @@
             def test_mismatch(arg):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*",
             "*1 error*",
@@ -1876,7 +1876,7 @@
             def test_func4(marg):
                 pass
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines("""
             test_mod1.py::test_func[s1] PASSED
             test_mod2.py::test_func2[s1] PASSED
@@ -1928,7 +1928,7 @@
                 def test_3(self):
                     pass
         """)
-        result = testdir.runpytest("-vs")
+        result = testdir.inline_runpytest("-vs")
         result.stdout.fnmatch_lines("""
             test_class_ordering.py::TestClass2::test_1[1-a] PASSED
             test_class_ordering.py::TestClass2::test_1[2-a] PASSED
@@ -2019,7 +2019,7 @@
             def test_finish():
                 assert not l
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines("""
             *3 passed*
         """)
@@ -2049,7 +2049,7 @@
             def test_browser(browser):
                 assert browser['visited'] is True
         """))
-        reprec = testdir.runpytest("-s")
+        reprec = testdir.inline_runpytest("-s")
         for test in ['test_browser']:
             reprec.stdout.fnmatch_lines('*Finalized*')
 
@@ -2260,7 +2260,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.runpytest('-v')
+        res = testdir.inline_runpytest('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2277,7 +2277,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.runpytest('-v')
+        res = testdir.inline_runpytest('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2337,7 +2337,7 @@
             def test_something(gen):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2363,7 +2363,7 @@
             def test_3():
                 assert l[0] != l[1]
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines("""
             *ERROR*teardown*test_1*
             *KeyError*
@@ -2383,7 +2383,7 @@
             def test_something():
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2397,7 +2397,7 @@
         assert config.option.showfixtures
 
     def test_show_fixtures(self, testdir):
-        result = testdir.runpytest("--fixtures")
+        result = testdir.inline_runpytest("--fixtures")
         result.stdout.fnmatch_lines([
                 "*tmpdir*",
                 "*temporary directory*",
@@ -2405,7 +2405,7 @@
         )
 
     def test_show_fixtures_verbose(self, testdir):
-        result = testdir.runpytest("--fixtures", "-v")
+        result = testdir.inline_runpytest("--fixtures", "-v")
         result.stdout.fnmatch_lines([
                 "*tmpdir*--*tmpdir.py*",
                 "*temporary directory*",
@@ -2422,7 +2422,7 @@
             def arg1():
                 """  hello world """
         ''')
-        result = testdir.runpytest("--fixtures", p)
+        result = testdir.inline_runpytest("--fixtures", p)
         result.stdout.fnmatch_lines("""
             *tmpdir
             *fixtures defined from*
@@ -2444,7 +2444,7 @@
                 def test_hello():
                     pass
             """)
-        result = testdir.runpytest("--fixtures")
+        result = testdir.inline_runpytest("--fixtures")
         result.stdout.fnmatch_lines("""
             *tmpdir*
             *fixtures defined from*conftest*
@@ -2470,7 +2470,7 @@
 
                 """
         ''')
-        result = testdir.runpytest("--fixtures", p)
+        result = testdir.inline_runpytest("--fixtures", p)
         result.stdout.fnmatch_lines("""
             * fixtures defined from test_show_fixtures_trimmed_doc *
             arg2
@@ -2498,7 +2498,7 @@
                 print ("test2 %s" % arg1)
                 assert 0
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2521,7 +2521,7 @@
             def test_2(arg1):
                 print ("test2 %s" % arg1)
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2539,7 +2539,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*setup*
             *1 error*
@@ -2555,7 +2555,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*teardown*
             *1 passed*1 error*
@@ -2571,7 +2571,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *fixture function*
             *test_yields*:2*
@@ -2587,7 +2587,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *yield_fixture*requires*yield*
             *yield_fixture*
@@ -2603,7 +2603,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest("-s")
+        result = testdir.inline_runpytest("-s")
         result.stdout.fnmatch_lines("""
             *fixture*cannot use*yield*
             *def arg1*

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -246,7 +246,7 @@
                 assert x in (10,20)
                 assert y == 2
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines([
             "*test_simple*1-2*",
             "*test_simple*2-2*",
@@ -290,11 +290,9 @@
                 def test_meth(self, x, y):
                     assert 0, x
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret == 1
-        result.stdout.fnmatch_lines([
-            "*6 fail*",
-        ])
+        result.assert_outcomes(failed=6)
 
     def test_parametrize_CSV(self, testdir):
         testdir.makepyfile("""
@@ -332,7 +330,7 @@
                def test_3(self, arg, arg2):
                   pass
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *test_1*1*
@@ -374,8 +372,8 @@
                     assert metafunc.function == unbound
                     assert metafunc.cls == TestClass
         """)
-        result = testdir.runpytest(p, "-v")
-        result.assertoutcome(passed=2)
+        result = testdir.inline_runpytest(p, "-v")
+        result.assert_outcomes(passed=2)
 
     def test_addcall_with_two_funcargs_generators(self, testdir):
         testdir.makeconftest("""
@@ -391,7 +389,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*0*PASS*",
             "*test_myfunc*1*FAIL*",
@@ -412,7 +410,7 @@
             def test_func2(arg1):
                 assert arg1 in (10, 20)
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*0*PASS*",
             "*test_func1*1*FAIL*",
@@ -429,10 +427,8 @@
                 def test_hello(xyz):
                     pass
         """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 pass*",
-        ])
+        result = testdir.inline_runpytest(p)
+        result.assert_outcomes(passed=1)
 
 
     def test_generate_plugin_and_module(self, testdir):
@@ -454,7 +450,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*test_myfunc*world*FAIL*",
@@ -470,7 +466,7 @@
                 def test_myfunc(self, hello):
                     assert hello == "world"
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*1 passed*"
@@ -487,7 +483,7 @@
                     assert not hasattr(self, 'x')
                     self.x = 1
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*0*PASS*",
             "*test_func*1*PASS*",
@@ -505,10 +501,8 @@
                 def setup_method(self, func):
                     self.val = 1
             """)
-        result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 pass*",
-        ])
+        result = testdir.inline_runpytest(p)
+        result.assert_outcomes(passed=1)
 
     def test_parametrize_functional2(self, testdir):
         testdir.makepyfile("""
@@ -518,7 +512,7 @@
             def test_hello(arg1, arg2):
                 assert 0, (arg1, arg2)
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         result.stdout.fnmatch_lines([
             "*(1, 4)*",
             "*(1, 5)*",
@@ -543,7 +537,7 @@
             def test_func1(arg1, arg2):
                 assert arg1 == 11
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*1*PASS*",
             "*1 passed*"
@@ -564,7 +558,7 @@
             def test_func(arg2):
                 assert arg2 == 10
         """)
-        result = testdir.runpytest("-v", p)
+        result = testdir.inline_runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*1*PASS*",
             "*1 passed*"
@@ -580,7 +574,7 @@
             def test_function(a, b):
                 assert a == b
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         assert result.ret == 1
         result.stdout.fnmatch_lines_random([
             "*test_function*basic*PASSED",
@@ -597,7 +591,7 @@
             def test_function(a, b):
                 assert 1
         """)
-        result = testdir.runpytest("-v")
+        result = testdir.inline_runpytest("-v")
         result.stdout.fnmatch_lines("""
             *test_function*1-b0*
             *test_function*1.3-b1*
@@ -653,8 +647,8 @@
             def test_function():
                 pass
         """)
-        reprec = testdir.inline_run()
-        reprec.assertoutcome(passed=1)
+        reprec = testdir.inline_runpytest()
+        reprec.assert_outcomes(passed=1)
 
     def test_generate_tests_only_done_in_subdir(self, testdir):
         sub1 = testdir.mkpydir("sub1")
@@ -669,10 +663,8 @@
         """))
         sub1.join("test_in_sub1.py").write("def test_1(): pass")
         sub2.join("test_in_sub2.py").write("def test_2(): pass")
-        result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
-        result.stdout.fnmatch_lines([
-            "*3 passed*"
-        ])
+        result = testdir.inline_runpytest("-v", "-s", sub1, sub2, sub1)
+        result.assert_outcomes(passed=3)
 
     def test_generate_same_function_names_issue403(self, testdir):
         testdir.makepyfile("""
@@ -687,8 +679,8 @@
             test_x = make_tests()
             test_y = make_tests()
         """)
-        reprec = testdir.inline_run()
-        reprec.assertoutcome(passed=4)
+        reprec = testdir.inline_runpytest()
+        reprec.assert_outcomes(passed=4)
 
     @pytest.mark.issue463
     def test_parameterize_misspelling(self, testdir):

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/test_assertion.py
--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -451,7 +451,7 @@
             x = 3
             assert x == 4
     """)
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     assert "3 == 4" in result.stdout.str()
     off_options = (("--no-assert",),
                    ("--nomagic",),

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -39,7 +39,7 @@
             [pytest]
             minversion=9.0
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.ret != 0
         result.stderr.fnmatch_lines([
             "*tox.ini:2*requires*9.0*actual*"
@@ -75,7 +75,7 @@
             [pytest]
             addopts = --qwe
         """)
-        result = testdir.runpytest("--confcutdir=.")
+        result = testdir.inline_run("--confcutdir=.")
         assert result.ret == 0
 
 class TestConfigCmdlineParsing:
@@ -320,7 +320,7 @@
         def pytest_cmdline_preparse(args):
             args.append("-h")
     """)
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     result.stdout.fnmatch_lines([
         "*pytest*",
         "*-h*",
@@ -389,11 +389,11 @@
             def test_hello(fix):
                 pass
         """)
-        result = testdir.runpytest()
+        result = testdir.inline_runpytest()
         assert result.parseoutcomes()["warnings"] > 0
         assert "hello" not in result.stdout.str()
 
-        result = testdir.runpytest("-rw")
+        result = testdir.inline_runpytest("-rw")
         result.stdout.fnmatch_lines("""
             ===*warning summary*===
             *WT1*test_warn_on_test_item*:5*hello*

diff -r 156f6edf7067d395e77fa81742e56833066d3f37 -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b testing/test_nose.py
--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -18,10 +18,8 @@
         test_hello.setup = lambda: l.append(1)
         test_hello.teardown = lambda: l.append(2)
     """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result.assert_outcomes(passed=2)
 
 
 def test_setup_func_with_setup_decorator():
@@ -65,10 +63,8 @@
             assert l == [1,2]
 
     """)
-    result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result.assert_outcomes(passed=2)
 
 
 def test_nose_setup_func_failure(testdir):
@@ -89,7 +85,7 @@
             assert l == [1,2]
 
     """)
-    result = testdir.runpytest(p, '-p', 'nose')
+    result = testdir.inline_runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*TypeError: <lambda>()*"
     ])
@@ -140,7 +136,7 @@
         test_hello.setup = my_setup_partial
         test_hello.teardown = my_teardown_partial
     """)
-    result = testdir.runpytest(p, '-p', 'nose')
+    result = testdir.inline_runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*"
     ])
@@ -207,7 +203,7 @@
                 #expect.append('setup')
                 eq_(self.called, expect)
     """)
-    result = testdir.runpytest(p, '-p', 'nose')
+    result = testdir.inline_runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*10 passed*"
     ])
@@ -238,7 +234,7 @@
             assert items[2] == 2
             assert 1 not in items
     """)
-    result = testdir.runpytest('-p', 'nose')
+    result = testdir.inline_runpytest('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -260,7 +256,7 @@
         def test_world():
             assert l == [1]
         """)
-    result = testdir.runpytest('-p', 'nose')
+    result = testdir.inline_runpytest('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -276,7 +272,7 @@
             def test_first(self):
                 pass
         """)
-    result = testdir.runpytest()
+    result = testdir.inline_runpytest()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -301,8 +297,8 @@
             def test_fun(self):
                 pass
         """)
-    result = testdir.runpytest()
-    result.stdout.fnmatch_lines("*1 passed*")
+    result = testdir.inline_runpytest()
+    result.assert_outcomes(passed=1)
 
 @pytest.mark.skipif("sys.version_info < (2,6)")
 def test_setup_teardown_linking_issue265(testdir):
@@ -327,8 +323,8 @@
                 """Undoes the setup."""
                 raise Exception("should not call teardown for skipped tests")
         ''')
-    reprec = testdir.inline_run()
-    reprec.assertoutcome(passed=1, skipped=1)
+    reprec = testdir.inline_runpytest()
+    reprec.assert_outcomes(passed=1, skipped=1)
 
 
 def test_SkipTest_during_collection(testdir):
@@ -338,8 +334,8 @@
         def test_failing():
             assert False
         """)
-    result = testdir.runpytest(p)
-    result.assertoutcome(skipped=1)
+    result = testdir.inline_runpytest(p)
+    result.assert_outcomes(skipped=1)
 
 
 def test_SkipTest_in_test(testdir):


https://bitbucket.org/pytest-dev/pytest/commits/dc1c8c7ea818/
Changeset:   dc1c8c7ea818
Branch:      testrefactor
User:        hpk42
Date:        2015-04-28 09:54:46+00:00
Summary:     - refine lsof checking
- make runpytest() create an inline testing process instead of
  a subprocess one
Affected #:  18 files

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -38,8 +38,11 @@
             tw.line("ERROR: could not load %s\n" % (e.path), red=True)
             return 4
         else:
-            config.pluginmanager.check_pending()
-            return config.hook.pytest_cmdline_main(config=config)
+            try:
+                config.pluginmanager.check_pending()
+                return config.hook.pytest_cmdline_main(config=config)
+            finally:
+                config._ensure_unconfigure()
     except UsageError as e:
         for msg in e.args:
             sys.stderr.write("ERROR: %s\n" %(msg,))
@@ -85,12 +88,18 @@
         if not isinstance(args, str):
             raise ValueError("not a string or argument list: %r" % (args,))
         args = shlex.split(args)
-    pluginmanager = get_config().pluginmanager
-    if plugins:
-        for plugin in plugins:
-            pluginmanager.register(plugin)
-    return pluginmanager.hook.pytest_cmdline_parse(
-            pluginmanager=pluginmanager, args=args)
+    config = get_config()
+    pluginmanager = config.pluginmanager
+    try:
+        if plugins:
+            for plugin in plugins:
+                pluginmanager.register(plugin)
+        return pluginmanager.hook.pytest_cmdline_parse(
+                pluginmanager=pluginmanager, args=args)
+    except BaseException:
+        config._ensure_unconfigure()
+        raise
+
 
 def exclude_pytest_names(name):
     return not name.startswith(name) or name == "pytest_plugins" or \
@@ -263,7 +272,10 @@
 
     def consider_pluginarg(self, arg):
         if arg.startswith("no:"):
-            self.set_blocked(arg[3:])
+            name = arg[3:]
+            self.set_blocked(name)
+            if not name.startswith("pytest_"):
+                self.set_blocked("pytest_" + name)
         else:
             self.import_plugin(arg)
 

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -1,5 +1,6 @@
 """ (disabled by default) support for testing pytest and pytest plugins. """
 import sys
+import traceback
 import os
 import codecs
 import re
@@ -287,7 +288,8 @@
             break
         self.tmpdir = tmpdir
         self.plugins = []
-        self._savesyspath = list(sys.path)
+        self._savesyspath = (list(sys.path), list(sys.meta_path))
+        self._savemodulekeys = set(sys.modules)
         self.chdir() # always chdir
         self.request.addfinalizer(self.finalize)
 
@@ -303,23 +305,23 @@
         has finished.
 
         """
-        sys.path[:] = self._savesyspath
+        sys.path[:], sys.meta_path[:] = self._savesyspath
         if hasattr(self, '_olddir'):
             self._olddir.chdir()
         self.delete_loaded_modules()
 
     def delete_loaded_modules(self):
-        """Delete modules that have been loaded from tmpdir.
+        """Delete modules that have been loaded during a test.
 
         This allows the interpreter to catch module changes in case
         the module is re-imported.
 
         """
-        for name, mod in list(sys.modules.items()):
-            if mod:
-                fn = getattr(mod, '__file__', None)
-                if fn and fn.startswith(str(self.tmpdir)):
-                    del sys.modules[name]
+        for name in set(sys.modules).difference(self._savemodulekeys):
+            # it seems zope.interfaces is keeping some state
+            # (used by twisted related tests)
+            if name != "zope.interface":
+                del sys.modules[name]
 
     def make_hook_recorder(self, pluginmanager):
         """Create a new :py:class:`HookRecorder` for a PluginManager."""
@@ -584,16 +586,27 @@
         reprec.ret = ret
         return reprec
 
-    def inline_runpytest(self, *args):
+    def inline_runpytest(self, *args, **kwargs):
         """ Return result of running pytest in-process, providing a similar
         interface to what self.runpytest() provides. """
+        if kwargs.get("syspathinsert"):
+            self.syspathinsert()
         now = time.time()
-        capture = py.io.StdCaptureFD()
+        capture = py.io.StdCapture()
         try:
-            reprec = self.inline_run(*args)
+            try:
+                reprec = self.inline_run(*args)
+            except SystemExit as e:
+                class reprec:
+                    ret = e.args[0]
+            except Exception:
+                traceback.print_exc()
+                class reprec:
+                    ret = 3
         finally:
             out, err = capture.reset()
-        assert out or err
+            sys.stdout.write(out)
+            sys.stderr.write(err)
 
         res = RunResult(reprec.ret,
                         out.split("\n"), err.split("\n"),
@@ -601,6 +614,9 @@
         res.reprec = reprec
         return res
 
+    def runpytest(self, *args, **kwargs):
+        return self.inline_runpytest(*args, **kwargs)
+
     def parseconfig(self, *args):
         """Return a new py.test Config instance from given commandline args.
 
@@ -822,7 +838,7 @@
         command = self._getsysprepend() + command
         return self.run(sys.executable, "-c", command)
 
-    def runpytest(self, *args):
+    def runpytest_subprocess(self, *args):
         """Run py.test as a subprocess with given arguments.
 
         Any plugins added to the :py:attr:`plugins` list will added

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 doc/en/example/assertion/test_failures.py
--- a/doc/en/example/assertion/test_failures.py
+++ b/doc/en/example/assertion/test_failures.py
@@ -7,7 +7,7 @@
     target = testdir.tmpdir.join(failure_demo.basename)
     failure_demo.copy(target)
     failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
-    result = testdir.runpytest(target)
+    result = testdir.runpytest(target, syspathinsert=True)
     result.stdout.fnmatch_lines([
         "*42 failed*"
     ])

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -82,7 +82,7 @@
             def test_option(pytestconfig):
                 assert pytestconfig.option.xyz == "123"
         """)
-        result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123")
+        result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines([
             '*1 passed*',
@@ -353,7 +353,8 @@
             *unrecognized*
         """)
 
-    def test_getsourcelines_error_issue553(self, testdir):
+    def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
+        monkeypatch.setattr("inspect.getsourcelines", None)
         p = testdir.makepyfile("""
             def raise_error(obj):
                 raise IOError('source code not available')

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/conftest.py
--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -1,5 +1,6 @@
 import pytest
 import sys
+import gc
 
 pytest_plugins = "pytester",
 
@@ -17,8 +18,8 @@
 
     def _parse_lsof_output(self, out):
         def isopen(line):
-            return line.startswith('f') and (
-                "deleted" not in line and 'mem' not in line and "txt" not in line and 'cwd' not in line)
+            return line.startswith('f') and ("deleted" not in line and
+                'mem' not in line and "txt" not in line and 'cwd' not in line)
 
         open_files = []
 
@@ -32,46 +33,49 @@
 
         return open_files
 
+    def matching_platform(self):
+        try:
+            py.process.cmdexec("lsof -v")
+        except py.process.cmdexec.Error:
+            return False
+        else:
+            return True
+
+    @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
+    def pytest_runtest_item(self, item):
+        lines1 = self.get_open_files()
+        yield
+        if hasattr(sys, "pypy_version_info"):
+            gc.collect()
+        lines2 = self.get_open_files()
+
+        new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+        leaked_files = [t for t in lines2 if t[0] in new_fds]
+        if leaked_files:
+            error = []
+            error.append("***** %s FD leakage detected" % len(leaked_files))
+            error.extend([str(f) for f in leaked_files])
+            error.append("*** Before:")
+            error.extend([str(f) for f in lines1])
+            error.append("*** After:")
+            error.extend([str(f) for f in lines2])
+            error.append(error[0])
+            error.append("*** function %s:%s: %s " % item.location)
+            pytest.fail("\n".join(error), pytrace=False)
+
 
 def pytest_addoption(parser):
     parser.addoption('--lsof',
            action="store_true", dest="lsof", default=False,
            help=("run FD checks if lsof is available"))
 
-def pytest_runtest_setup(item):
-    config = item.config
-    config._basedir = py.path.local()
+
+def pytest_configure(config):
     if config.getvalue("lsof"):
-        try:
-            config._fd_leak_checker = LsofFdLeakChecker()
-            config._openfiles = config._fd_leak_checker.get_open_files()
-        except py.process.cmdexec.Error:
-            pass
+        checker = LsofFdLeakChecker()
+        if checker.matching_platform():
+            config.pluginmanager.register(checker)
 
-#def pytest_report_header():
-#    return "pid: %s" % os.getpid()
-
-def check_open_files(config):
-    lines2 = config._fd_leak_checker.get_open_files()
-    new_fds = set([t[0] for t in lines2]) - set([t[0] for t in config._openfiles])
-    open_files = [t for t in lines2 if t[0] in new_fds]
-    if open_files:
-        error = []
-        error.append("***** %s FD leakage detected" % len(open_files))
-        error.extend([str(f) for f in open_files])
-        error.append("*** Before:")
-        error.extend([str(f) for f in config._openfiles])
-        error.append("*** After:")
-        error.extend([str(f) for f in lines2])
-        error.append(error[0])
-        raise AssertionError("\n".join(error))
-
- at pytest.hookimpl_opts(hookwrapper=True, trylast=True)
-def pytest_runtest_teardown(item):
-    yield
-    item.config._basedir.chdir()
-    if hasattr(item.config, '_openfiles'):
-        check_open_files(item.config)
 
 # XXX copied from execnet's conftest.py - needs to be merged
 winpymap = {

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -626,10 +626,8 @@
     """))
     sub1.join("test_in_sub1.py").write("def test_1(): pass")
     sub2.join("test_in_sub2.py").write("def test_2(): pass")
-    result = testdir.inline_runpytest("-v", "-s")
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result = testdir.runpytest("-v", "-s")
+    result.assert_outcomes(passed=2)
 
 def test_modulecol_roundtrip(testdir):
     modcol = testdir.getmodulecol("pass", withinit=True)

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -99,10 +99,8 @@
 
         sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
         sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
-        result = testdir.inline_runpytest("-v")
-        result.stdout.fnmatch_lines([
-            "*2 passed*"
-        ])
+        result = testdir.runpytest("-v")
+        result.assert_outcomes(passed=2)
 
     def test_extend_fixture_module_class(self, testdir):
         testfile = testdir.makepyfile("""

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -663,7 +663,7 @@
         """))
         sub1.join("test_in_sub1.py").write("def test_1(): pass")
         sub2.join("test_in_sub2.py").write("def test_2(): pass")
-        result = testdir.inline_runpytest("-v", "-s", sub1, sub2, sub1)
+        result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
         result.assert_outcomes(passed=3)
 
     def test_generate_same_function_names_issue403(self, testdir):

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_assertion.py
--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -451,7 +451,7 @@
             x = 3
             assert x == 4
     """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest()
     assert "3 == 4" in result.stdout.str()
     off_options = (("--no-assert",),
                    ("--nomagic",),
@@ -461,7 +461,7 @@
                    ("--assert=plain", "--nomagic"),
                    ("--assert=plain", "--no-assert", "--nomagic"))
     for opt in off_options:
-        result = testdir.runpytest(*opt)
+        result = testdir.runpytest_subprocess(*opt)
         assert "3 == 4" not in result.stdout.str()
 
 def test_old_assert_mode(testdir):
@@ -469,7 +469,7 @@
         def test_in_old_mode():
             assert "@py_builtins" not in globals()
     """)
-    result = testdir.runpytest("--assert=reinterp")
+    result = testdir.runpytest_subprocess("--assert=reinterp")
     assert result.ret == 0
 
 def test_triple_quoted_string_issue113(testdir):

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_assertrewrite.py
--- a/testing/test_assertrewrite.py
+++ b/testing/test_assertrewrite.py
@@ -453,7 +453,7 @@
                 assert not os.path.exists(__cached__)
                 assert not os.path.exists(os.path.dirname(__cached__))""")
         monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
-        assert testdir.runpytest().ret == 0
+        assert testdir.runpytest_subprocess().ret == 0
 
     @pytest.mark.skipif('"__pypy__" in sys.modules')
     def test_pyc_vs_pyo(self, testdir, monkeypatch):
@@ -615,10 +615,8 @@
         testdir.makepyfile(**contents)
         testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
 
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            '* 1 passed*',
-        ])
+        result = testdir.runpytest_subprocess()
+        result.assert_outcomes(passed=1)
 
     def test_read_pyc(self, tmpdir):
         """

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_capture.py
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -121,7 +121,7 @@
             print (sys.stdout)
             print (%s)
     """ % obj)
-    result = testdir.runpytest("--capture=%s" % method)
+    result = testdir.runpytest_subprocess("--capture=%s" % method)
     result.stdout.fnmatch_lines([
         "*1 passed*"
     ])
@@ -133,7 +133,7 @@
         def test_unicode():
             print ('b\\u00f6y')
     """)
-    result = testdir.runpytest("--capture=%s" % method)
+    result = testdir.runpytest_subprocess("--capture=%s" % method)
     result.stdout.fnmatch_lines([
         "*1 passed*"
     ])
@@ -144,7 +144,7 @@
         print ("collect %s failure" % 13)
         import xyz42123
     """)
-    result = testdir.runpytest(p)
+    result = testdir.runpytest_subprocess(p)
     result.stdout.fnmatch_lines([
         "*Captured stdout*",
         "*collect 13 failure*",
@@ -165,7 +165,7 @@
                 print ("in func2")
                 assert 0
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "setup module*",
             "setup test_func1*",
@@ -188,7 +188,7 @@
             def teardown_function(func):
                 print ("in teardown")
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*test_func():*",
             "*Captured stdout during setup*",
@@ -206,7 +206,7 @@
                 print ("in func2")
                 assert 0
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         s = result.stdout.str()
         assert "in func1" not in s
         assert "in func2" in s
@@ -222,7 +222,7 @@
                 print ("in func1")
                 pass
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             '*teardown_function*',
             '*Captured stdout*',
@@ -240,7 +240,7 @@
             def test_func():
                 pass
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*def teardown_module(mod):*",
             "*Captured stdout*",
@@ -259,7 +259,7 @@
                 sys.stderr.write(str(2))
                 raise ValueError
         """)
-        result = testdir.runpytest(p1)
+        result = testdir.runpytest_subprocess(p1)
         result.stdout.fnmatch_lines([
             "*test_capturing_outerr.py .F",
             "====* FAILURES *====",
@@ -282,7 +282,7 @@
                 logging.basicConfig(stream=stream)
                 stream.close() # to free memory/release resources
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stderr.str().find("atexit") == -1
 
     def test_logging_and_immediate_setupteardown(self, testdir):
@@ -301,7 +301,7 @@
         """)
         for optargs in (('--capture=sys',), ('--capture=fd',)):
             print (optargs)
-            result = testdir.runpytest(p, *optargs)
+            result = testdir.runpytest_subprocess(p, *optargs)
             s = result.stdout.str()
             result.stdout.fnmatch_lines([
                 "*WARN*hello3",  # errors show first!
@@ -327,7 +327,7 @@
         """)
         for optargs in (('--capture=sys',), ('--capture=fd',)):
             print (optargs)
-            result = testdir.runpytest(p, *optargs)
+            result = testdir.runpytest_subprocess(p, *optargs)
             s = result.stdout.str()
             result.stdout.fnmatch_lines([
                 "*WARN*hello3",  # errors come first
@@ -348,7 +348,7 @@
                 logging.warn("hello432")
                 assert 0
         """)
-        result = testdir.runpytest(
+        result = testdir.runpytest_subprocess(
             p, "--traceconfig",
             "-p", "no:capturelog")
         assert result.ret != 0
@@ -364,7 +364,7 @@
                 logging.warn("hello435")
         """)
         # make sure that logging is still captured in tests
-        result = testdir.runpytest("-s", "-p", "no:capturelog")
+        result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
         assert result.ret == 0
         result.stderr.fnmatch_lines([
             "WARNING*hello435*",
@@ -383,7 +383,7 @@
                 logging.warn("hello433")
                 assert 0
         """)
-        result = testdir.runpytest(p, "-p", "no:capturelog")
+        result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "WARNING*hello433*",
@@ -410,7 +410,7 @@
             def test_two(capfd, capsys):
                 pass
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*ERROR*setup*test_one*",
             "*capsys*capfd*same*time*",
@@ -425,7 +425,7 @@
                 print ("xxx42xxx")
                 assert 0
         """ % method)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "xxx42xxx",
         ])
@@ -447,7 +447,7 @@
             def test_hello(capsys, missingarg):
                 pass
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*test_partial_setup_failure*",
             "*1 error*",
@@ -461,7 +461,7 @@
                 os.write(1, str(42).encode('ascii'))
                 raise KeyboardInterrupt()
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*KeyboardInterrupt*"
         ])
@@ -474,7 +474,7 @@
             def test_log(capsys):
                 logging.error('x')
             """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         assert 'closed' not in result.stderr.str()
 
 
@@ -485,7 +485,7 @@
             raise ValueError(42)
     """))
     sub1.join("test_mod.py").write("def test_func1(): pass")
-    result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
+    result = testdir.runpytest_subprocess(testdir.tmpdir, '--traceconfig')
     result.stdout.fnmatch_lines([
         "*ValueError(42)*",
         "*1 error*"
@@ -500,7 +500,7 @@
         def test_hello(capfd):
             pass
     """)
-    result = testdir.runpytest("--capture=no")
+    result = testdir.runpytest_subprocess("--capture=no")
     result.stdout.fnmatch_lines([
         "*1 skipped*"
     ])
@@ -512,7 +512,7 @@
             print ("hello19")
     """)
     testdir.makepyfile("def test_func(): pass")
-    result = testdir.runpytest()
+    result = testdir.runpytest_subprocess()
     assert result.ret == 0
     assert 'hello19' not in result.stdout.str()
 
@@ -526,7 +526,7 @@
             os.write(1, omg)
             assert 0
         """)
-    result = testdir.runpytest('--cap=fd')
+    result = testdir.runpytest_subprocess('--cap=fd')
     result.stdout.fnmatch_lines('''
         *def test_func*
         *assert 0*
@@ -541,7 +541,7 @@
             print ("hello19")
     """)
     testdir.makepyfile("def test_func(): pass")
-    result = testdir.runpytest("-vs")
+    result = testdir.runpytest_subprocess("-vs")
     assert result.ret == 0
     assert 'hello19' in result.stdout.str()
 
@@ -562,7 +562,7 @@
         if __name__ == '__main__':
             test_foo()
         """)
-    result = testdir.runpytest('--assert=plain')
+    result = testdir.runpytest_subprocess('--assert=plain')
     result.stdout.fnmatch_lines([
         '*2 passed*',
     ])
@@ -885,7 +885,7 @@
                 os.write(1, "hello\\n".encode("ascii"))
                 assert 0
         """)
-        result = testdir.runpytest()
+        result = testdir.runpytest_subprocess()
         result.stdout.fnmatch_lines("""
             *test_x*
             *assert 0*
@@ -936,7 +936,7 @@
                 cap = StdCaptureFD(out=False, err=False, in_=True)
                 cap.stop_capturing()
         """)
-        result = testdir.runpytest("--capture=fd")
+        result = testdir.runpytest_subprocess("--capture=fd")
         assert result.ret == 0
         assert result.parseoutcomes()['passed'] == 3
 
@@ -971,7 +971,7 @@
             os.write(1, b"hello\\n")
             assert 0
     """)
-    result = testdir.runpytest()
+    result = testdir.runpytest_subprocess()
     result.stdout.fnmatch_lines("""
         *test_capture_again*
         *assert 0*

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_collection.py
--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -296,7 +296,6 @@
         subdir.ensure("__init__.py")
         target = subdir.join(p.basename)
         p.move(target)
-        testdir.chdir()
         subdir.chdir()
         config = testdir.parseconfig(p.basename)
         rcol = Session(config=config)
@@ -470,7 +469,6 @@
             assert col.config is config
 
     def test_pkgfile(self, testdir):
-        testdir.chdir()
         tmpdir = testdir.tmpdir
         subdir = tmpdir.join("subdir")
         x = subdir.ensure("x.py")

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_core.py
--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -961,7 +961,7 @@
         """)
         p.copy(p.dirpath("skipping2.py"))
         monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
-        result = testdir.runpytest("-rw", "-p", "skipping1", "--traceconfig")
+        result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines([
             "WI1*skipped plugin*skipping1*hello*",
@@ -990,7 +990,7 @@
                 assert plugin is not None
         """)
         monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
-        result = testdir.runpytest(p)
+        result = testdir.runpytest(p, syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines(["*1 passed*"])
 

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_doctest.py
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -75,8 +75,6 @@
             assert isinstance(items[0].parent, DoctestModule)
             assert items[0].parent is items[1].parent
 
-    @pytest.mark.xfail('hasattr(sys, "pypy_version_info")', reason=
-                       "pypy leaks one FD")
     def test_simple_doctestfile(self, testdir):
         p = testdir.maketxtfile(test_doc="""
             >>> x = 1

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_genscript.py
--- a/testing/test_genscript.py
+++ b/testing/test_genscript.py
@@ -16,7 +16,6 @@
         assert self.script.check()
 
     def run(self, anypython, testdir, *args):
-        testdir.chdir()
         return testdir._run(anypython, self.script, *args)
 
 def test_gen(testdir, anypython, standalone):

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_helpconfig.py
--- a/testing/test_helpconfig.py
+++ b/testing/test_helpconfig.py
@@ -53,14 +53,14 @@
     ])
 
 def test_debug(testdir, monkeypatch):
-    result = testdir.runpytest("--debug")
+    result = testdir.runpytest_subprocess("--debug")
     assert result.ret == 0
     p = testdir.tmpdir.join("pytestdebug.log")
     assert "pytest_sessionstart" in p.read()
 
 def test_PYTEST_DEBUG(testdir, monkeypatch):
     monkeypatch.setenv("PYTEST_DEBUG", "1")
-    result = testdir.runpytest()
+    result = testdir.runpytest_subprocess()
     assert result.ret == 0
     result.stderr.fnmatch_lines([
         "*pytest_plugin_registered*",

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_pdb.py
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -260,7 +260,7 @@
 
     def test_pdb_collection_failure_is_shown(self, testdir):
         p1 = testdir.makepyfile("""xxx """)
-        result = testdir.runpytest("--pdb", p1)
+        result = testdir.runpytest_subprocess("--pdb", p1)
         result.stdout.fnmatch_lines([
             "*NameError*xxx*",
             "*1 error*",

diff -r d7f0b42aa8240f6031ca6b45e5a6a8a491060d2b -r dc1c8c7ea818036073356c01ed74608f0671fc73 testing/test_session.py
--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -203,7 +203,6 @@
 
 
 def test_plugin_specify(testdir):
-    testdir.chdir()
     pytest.raises(ImportError, """
             testdir.parseconfig("-p", "nqweotexistent")
     """)


https://bitbucket.org/pytest-dev/pytest/commits/390fe4614bd4/
Changeset:   390fe4614bd4
Branch:      testrefactor
User:        hpk42
Date:        2015-04-28 09:54:53+00:00
Summary:     streamline pytester API majorly:

- integrate conftest into pytester plugin
- introduce runpytest() to either call runpytest_inline (default) or
  runpytest_subprocess (python -m pytest)
- move testdir.inline_runsource1 to pdb tests
- strike some unneccessary methods.
- a new section "writing plugins" and some better pytester docs
Affected #:  17 files

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -43,6 +43,14 @@
   implementations.  Use the ``hookwrapper`` mechanism instead already 
   introduced with pytest-2.7.
 
+- speed up pytest's own test suite considerably by using inprocess
+  tests by default (testrun can be modified with --runpytest=subprocess
+  to create subprocesses in many places instead).  The main
+  APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess"
+  and "runpytest_inprocess" if you need a particular way of running
+  the test.  In all cases you get back a RunResult but the inprocess
+  one will also have a "reprec" attribute with the recorded events/reports.
+
  
 2.7.1.dev (compared to 2.7.0)
 -----------------------------

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -1,4 +1,5 @@
 """ (disabled by default) support for testing pytest and pytest plugins. """
+import gc
 import sys
 import traceback
 import os
@@ -16,6 +17,136 @@
 
 from _pytest.main import Session, EXIT_OK
 
+
+def pytest_addoption(parser):
+    # group = parser.getgroup("pytester", "pytester (self-tests) options")
+    parser.addoption('--lsof',
+           action="store_true", dest="lsof", default=False,
+           help=("run FD checks if lsof is available"))
+
+    parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+           choices=("inprocess", "subprocess", ),
+           help=("run pytest sub runs in tests using an 'inprocess' "
+                 "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+    # This might be called multiple times. Only take the first.
+    global _pytest_fullpath
+    try:
+        _pytest_fullpath
+    except NameError:
+        _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+    if config.getvalue("lsof"):
+        checker = LsofFdLeakChecker()
+        if checker.matching_platform():
+            config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+    def get_open_files(self):
+        out = self._exec_lsof()
+        open_files = self._parse_lsof_output(out)
+        return open_files
+
+    def _exec_lsof(self):
+        pid = os.getpid()
+        return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+    def _parse_lsof_output(self, out):
+        def isopen(line):
+            return line.startswith('f') and ("deleted" not in line and
+                'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+        open_files = []
+
+        for line in out.split("\n"):
+            if isopen(line):
+                fields = line.split('\0')
+                fd = fields[0][1:]
+                filename = fields[1][1:]
+                if filename.startswith('/'):
+                    open_files.append((fd, filename))
+
+        return open_files
+
+    def matching_platform(self):
+        try:
+            py.process.cmdexec("lsof -v")
+        except py.process.cmdexec.Error:
+            return False
+        else:
+            return True
+
+    @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
+    def pytest_runtest_item(self, item):
+        lines1 = self.get_open_files()
+        yield
+        if hasattr(sys, "pypy_version_info"):
+            gc.collect()
+        lines2 = self.get_open_files()
+
+        new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+        leaked_files = [t for t in lines2 if t[0] in new_fds]
+        if leaked_files:
+            error = []
+            error.append("***** %s FD leakage detected" % len(leaked_files))
+            error.extend([str(f) for f in leaked_files])
+            error.append("*** Before:")
+            error.extend([str(f) for f in lines1])
+            error.append("*** After:")
+            error.extend([str(f) for f in lines2])
+            error.append(error[0])
+            error.append("*** function %s:%s: %s " % item.location)
+            pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+    'python2.7': r'C:\Python27\python.exe',
+    'python2.6': r'C:\Python26\python.exe',
+    'python3.1': r'C:\Python31\python.exe',
+    'python3.2': r'C:\Python32\python.exe',
+    'python3.3': r'C:\Python33\python.exe',
+    'python3.4': r'C:\Python34\python.exe',
+    'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+    try:
+        return cache[name]
+    except KeyError:
+        executable = py.path.local.sysfind(name)
+        if executable:
+            if name == "jython":
+                import subprocess
+                popen = subprocess.Popen([str(executable), "--version"],
+                    universal_newlines=True, stderr=subprocess.PIPE)
+                out, err = popen.communicate()
+                if not err or "2.5" not in err:
+                    executable = None
+                if "2.5.2" in err:
+                    executable = None # http://bugs.jython.org/issue1790
+        cache[name] = executable
+        return executable
+
+ at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+                        'pypy', 'pypy3'])
+def anypython(request):
+    name = request.param
+    executable = getexecutable(name)
+    if executable is None:
+        if sys.platform == "win32":
+            executable = winpymap.get(name, None)
+            if executable:
+                executable = py.path.local(executable)
+                if executable.check():
+                    return executable
+        pytest.skip("no suitable %s found" % (name,))
+    return executable
+
 # used at least by pytest-xdist plugin
 @pytest.fixture
 def _pytest(request):
@@ -40,23 +171,6 @@
     return [x for x in l if x[0] != "_"]
 
 
-def pytest_addoption(parser):
-    group = parser.getgroup("pylib")
-    group.addoption('--no-tools-on-path',
-           action="store_true", dest="notoolsonpath", default=False,
-           help=("discover tools on PATH instead of going through py.cmdline.")
-    )
-
-def pytest_configure(config):
-    # This might be called multiple times. Only take the first.
-    global _pytest_fullpath
-    try:
-        _pytest_fullpath
-    except NameError:
-        _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
-        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
-
-
 class ParsedCall:
     def __init__(self, name, kwargs):
         self.__dict__.update(kwargs)
@@ -202,7 +316,7 @@
     return LineMatcher
 
 def pytest_funcarg__testdir(request):
-    tmptestdir = TmpTestdir(request)
+    tmptestdir = Testdir(request)
     return tmptestdir
 
 
@@ -216,10 +330,10 @@
     :ret: The return value.
     :outlines: List of lines captured from stdout.
     :errlines: List of lines captures from stderr.
-    :stdout: LineMatcher of stdout, use ``stdout.str()`` to
+    :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
        reconstruct stdout or the commonly used
        ``stdout.fnmatch_lines()`` method.
-    :stderrr: LineMatcher of stderr.
+    :stderrr: :py:class:`LineMatcher` of stderr.
     :duration: Duration in seconds.
 
     """
@@ -253,7 +367,7 @@
 
 
 
-class TmpTestdir:
+class Testdir:
     """Temporary test directory with tools to test/run py.test itself.
 
     This is based on the ``tmpdir`` fixture but provides a number of
@@ -276,7 +390,6 @@
 
     def __init__(self, request):
         self.request = request
-        self.Config = request.config.__class__
         # XXX remove duplication with tmpdir plugin
         basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
         name = request.function.__name__
@@ -292,9 +405,14 @@
         self._savemodulekeys = set(sys.modules)
         self.chdir() # always chdir
         self.request.addfinalizer(self.finalize)
+        method = self.request.config.getoption("--runpytest")
+        if method == "inprocess":
+            self._runpytest_method = self.runpytest_inprocess
+        elif method == "subprocess":
+            self._runpytest_method = self.runpytest_subprocess
 
     def __repr__(self):
-        return "<TmpTestdir %r>" % (self.tmpdir,)
+        return "<Testdir %r>" % (self.tmpdir,)
 
     def finalize(self):
         """Clean up global state artifacts.
@@ -315,7 +433,6 @@
 
         This allows the interpreter to catch module changes in case
         the module is re-imported.
-
         """
         for name in set(sys.modules).difference(self._savemodulekeys):
             # it seems zope.interfaces is keeping some state
@@ -512,43 +629,19 @@
         l = list(cmdlineargs) + [p]
         return self.inline_run(*l)
 
-    def inline_runsource1(self, *args):
-        """Run a test module in process using ``pytest.main()``.
-
-        This behaves exactly like :py:meth:`inline_runsource` and
-        takes identical arguments.  However the return value is a list
-        of the reports created by the pytest_runtest_logreport hook
-        during the run.
-
-        """
-        args = list(args)
-        source = args.pop()
-        p = self.makepyfile(source)
-        l = list(args) + [p]
-        reprec = self.inline_run(*l)
-        reports = reprec.getreports("pytest_runtest_logreport")
-        assert len(reports) == 3, reports # setup/call/teardown
-        return reports[1]
-
     def inline_genitems(self, *args):
         """Run ``pytest.main(['--collectonly'])`` in-process.
 
         Retuns a tuple of the collected items and a
         :py:class:`HookRecorder` instance.
 
-        """
-        return self.inprocess_run(list(args) + ['--collectonly'])
-
-    def inprocess_run(self, args, plugins=()):
-        """Run ``pytest.main()`` in-process, return Items and a HookRecorder.
-
         This runs the :py:func:`pytest.main` function to run all of
         py.test inside the test process itself like
         :py:meth:`inline_run`.  However the return value is a tuple of
         the collection items and a :py:class:`HookRecorder` instance.
 
         """
-        rec = self.inline_run(*args, plugins=plugins)
+        rec = self.inline_run("--collect-only", *args)
         items = [x.item for x in rec.getcalls("pytest_itemcollected")]
         return items, rec
 
@@ -586,7 +679,7 @@
         reprec.ret = ret
         return reprec
 
-    def inline_runpytest(self, *args, **kwargs):
+    def runpytest_inprocess(self, *args, **kwargs):
         """ Return result of running pytest in-process, providing a similar
         interface to what self.runpytest() provides. """
         if kwargs.get("syspathinsert"):
@@ -615,7 +708,11 @@
         return res
 
     def runpytest(self, *args, **kwargs):
-        return self.inline_runpytest(*args, **kwargs)
+        """ Run pytest inline or in a subprocess, depending on the command line
+        option "--runpytest" and return a :py:class:`RunResult`.
+
+        """
+        return self._runpytest_method(*args, **kwargs)
 
     def parseconfig(self, *args):
         """Return a new py.test Config instance from given commandline args.
@@ -788,57 +885,23 @@
         except UnicodeEncodeError:
             print("couldn't print to %s because of encoding" % (fp,))
 
-    def runpybin(self, scriptname, *args):
-        """Run a py.* tool with arguments.
+    def _getpytestargs(self):
+        # we cannot use "(sys.executable,script)"
+        # because on windows the script is e.g. a py.test.exe
+        return (sys.executable, _pytest_fullpath,) # noqa
 
-        This can realy only be used to run py.test, you probably want
-            :py:meth:`runpytest` instead.
+    def runpython(self, script):
+        """Run a python script using sys.executable as interpreter.
 
         Returns a :py:class:`RunResult`.
-
         """
-        fullargs = self._getpybinargs(scriptname) + args
-        return self.run(*fullargs)
-
-    def _getpybinargs(self, scriptname):
-        if not self.request.config.getvalue("notoolsonpath"):
-            # XXX we rely on script referring to the correct environment
-            # we cannot use "(sys.executable,script)"
-            # because on windows the script is e.g. a py.test.exe
-            return (sys.executable, _pytest_fullpath,) # noqa
-        else:
-            pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
-
-    def runpython(self, script, prepend=True):
-        """Run a python script.
-
-        If ``prepend`` is True then the directory from which the py
-        package has been imported will be prepended to sys.path.
-
-        Returns a :py:class:`RunResult`.
-
-        """
-        # XXX The prepend feature is probably not very useful since the
-        #     split of py and pytest.
-        if prepend:
-            s = self._getsysprepend()
-            if s:
-                script.write(s + "\n" + script.read())
         return self.run(sys.executable, script)
 
-    def _getsysprepend(self):
-        if self.request.config.getvalue("notoolsonpath"):
-            s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
-        else:
-            s = ""
-        return s
-
     def runpython_c(self, command):
         """Run python -c "command", return a :py:class:`RunResult`."""
-        command = self._getsysprepend() + command
         return self.run(sys.executable, "-c", command)
 
-    def runpytest_subprocess(self, *args):
+    def runpytest_subprocess(self, *args, **kwargs):
         """Run py.test as a subprocess with given arguments.
 
         Any plugins added to the :py:attr:`plugins` list will added
@@ -863,7 +926,8 @@
         plugins = [x for x in self.plugins if isinstance(x, str)]
         if plugins:
             args = ('-p', plugins[0]) + args
-        return self.runpybin("py.test", *args)
+        args = self._getpytestargs() + args
+        return self.run(*args)
 
     def spawn_pytest(self, string, expect_timeout=10.0):
         """Run py.test using pexpect.
@@ -874,10 +938,8 @@
         The pexpect child is returned.
 
         """
-        if self.request.config.getvalue("notoolsonpath"):
-            pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests")
         basetemp = self.tmpdir.mkdir("pexpect")
-        invoke = " ".join(map(str, self._getpybinargs("py.test")))
+        invoke = " ".join(map(str, self._getpytestargs()))
         cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
         return self.spawn(cmd, expect_timeout=expect_timeout)
 

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee doc/en/writing_plugins.txt
--- a/doc/en/writing_plugins.txt
+++ b/doc/en/writing_plugins.txt
@@ -186,12 +186,44 @@
 If you want to look at the names of existing plugins, use
 the ``--traceconfig`` option.
 
+Testing plugins
+---------------
+
+pytest comes with some facilities that you can enable for testing your
+plugin.  Given that you have an installed plugin you can enable the
+:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a
+command line option to include the pytester plugin (``-p pytester``) or
+by putting ``pytest_plugins = pytester`` into your test or
+``conftest.py`` file.  You then will have a ``testdir`` fixure which you
+can use like this::
+
+    # content of test_myplugin.py
+
+    pytest_plugins = pytester  # to get testdir fixture
+
+    def test_myplugin(testdir):
+        testdir.makepyfile("""
+            def test_example():
+                pass
+        """)
+        result = testdir.runpytest("--verbose")
+        result.fnmatch_lines("""
+            test_example*
+        """)
+
+Note that by default ``testdir.runpytest()`` will perform a pytest
+in-process.  You can pass the command line option ``--runpytest=subprocess``
+to have it happen in a subprocess.
+
+Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more
+methods of the result object that you get from a call to ``runpytest``.
 
 .. _`writinghooks`:
 
 Writing hook functions
 ======================
 
+
 .. _validation:
 
 hook function validation and execution
@@ -493,3 +525,13 @@
 .. autoclass:: _pytest.core.CallOutcome()
     :members:
 
+.. currentmodule:: _pytest.pytester
+
+.. autoclass:: Testdir()
+    :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
+
+.. autoclass:: RunResult()
+    :members:
+
+.. autoclass:: LineMatcher()
+    :members:

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -203,7 +203,7 @@
             os.chdir(os.path.dirname(os.getcwd()))
             print (py.log)
         """))
-        result = testdir.runpython(p, prepend=False)
+        result = testdir.runpython(p)
         assert not result.ret
 
     def test_issue109_sibling_conftests_not_loaded(self, testdir):

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/conftest.py
--- a/testing/conftest.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import pytest
-import sys
-import gc
-
-pytest_plugins = "pytester",
-
-import os, py
-
-class LsofFdLeakChecker(object):
-    def get_open_files(self):
-        out = self._exec_lsof()
-        open_files = self._parse_lsof_output(out)
-        return open_files
-
-    def _exec_lsof(self):
-        pid = os.getpid()
-        return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
-
-    def _parse_lsof_output(self, out):
-        def isopen(line):
-            return line.startswith('f') and ("deleted" not in line and
-                'mem' not in line and "txt" not in line and 'cwd' not in line)
-
-        open_files = []
-
-        for line in out.split("\n"):
-            if isopen(line):
-                fields = line.split('\0')
-                fd = fields[0][1:]
-                filename = fields[1][1:]
-                if filename.startswith('/'):
-                    open_files.append((fd, filename))
-
-        return open_files
-
-    def matching_platform(self):
-        try:
-            py.process.cmdexec("lsof -v")
-        except py.process.cmdexec.Error:
-            return False
-        else:
-            return True
-
-    @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
-    def pytest_runtest_item(self, item):
-        lines1 = self.get_open_files()
-        yield
-        if hasattr(sys, "pypy_version_info"):
-            gc.collect()
-        lines2 = self.get_open_files()
-
-        new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
-        leaked_files = [t for t in lines2 if t[0] in new_fds]
-        if leaked_files:
-            error = []
-            error.append("***** %s FD leakage detected" % len(leaked_files))
-            error.extend([str(f) for f in leaked_files])
-            error.append("*** Before:")
-            error.extend([str(f) for f in lines1])
-            error.append("*** After:")
-            error.extend([str(f) for f in lines2])
-            error.append(error[0])
-            error.append("*** function %s:%s: %s " % item.location)
-            pytest.fail("\n".join(error), pytrace=False)
-
-
-def pytest_addoption(parser):
-    parser.addoption('--lsof',
-           action="store_true", dest="lsof", default=False,
-           help=("run FD checks if lsof is available"))
-
-
-def pytest_configure(config):
-    if config.getvalue("lsof"):
-        checker = LsofFdLeakChecker()
-        if checker.matching_platform():
-            config.pluginmanager.register(checker)
-
-
-# XXX copied from execnet's conftest.py - needs to be merged
-winpymap = {
-    'python2.7': r'C:\Python27\python.exe',
-    'python2.6': r'C:\Python26\python.exe',
-    'python3.1': r'C:\Python31\python.exe',
-    'python3.2': r'C:\Python32\python.exe',
-    'python3.3': r'C:\Python33\python.exe',
-    'python3.4': r'C:\Python34\python.exe',
-    'python3.5': r'C:\Python35\python.exe',
-}
-
-def getexecutable(name, cache={}):
-    try:
-        return cache[name]
-    except KeyError:
-        executable = py.path.local.sysfind(name)
-        if executable:
-            if name == "jython":
-                import subprocess
-                popen = subprocess.Popen([str(executable), "--version"],
-                    universal_newlines=True, stderr=subprocess.PIPE)
-                out, err = popen.communicate()
-                if not err or "2.5" not in err:
-                    executable = None
-                if "2.5.2" in err:
-                    executable = None # http://bugs.jython.org/issue1790
-        cache[name] = executable
-        return executable
-
- at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
-                        'pypy', 'pypy3'])
-def anypython(request):
-    name = request.param
-    executable = getexecutable(name)
-    if executable is None:
-        if sys.platform == "win32":
-            executable = winpymap.get(name, None)
-            if executable:
-                executable = py.path.local(executable)
-                if executable.check():
-                    return executable
-        pytest.skip("no suitable %s found" % (name,))
-    return executable

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -15,7 +15,7 @@
         p.pyimport()
         del py.std.sys.modules['test_whatever']
         b.ensure("test_whatever.py")
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*import*mismatch*",
             "*imported*test_whatever*",
@@ -59,7 +59,7 @@
                 def __init__(self):
                     pass
         """)
-        result = testdir.inline_runpytest("-rw")
+        result = testdir.runpytest_inprocess("-rw")
         result.stdout.fnmatch_lines_random("""
             WC1*test_class_with_init_warning.py*__init__*
         """)
@@ -69,7 +69,7 @@
             class test(object):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*collected 0*",
         ])
@@ -86,7 +86,7 @@
                 def teardown_class(cls):
                     pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*1 passed*",
         ])
@@ -534,7 +534,7 @@
         """)
         testdir.makepyfile("def test_some(): pass")
         testdir.makepyfile(test_xyz="def test_func(): pass")
-        result = testdir.inline_runpytest("--collect-only")
+        result = testdir.runpytest_inprocess("--collect-only")
         result.stdout.fnmatch_lines([
             "*<Module*test_pytest*",
             "*<MyModule*xyz*",
@@ -590,7 +590,7 @@
                     return MyFunction(name, collector)
         """)
         testdir.makepyfile("def some(): pass")
-        result = testdir.inline_runpytest("--collect-only")
+        result = testdir.runpytest_inprocess("--collect-only")
         result.stdout.fnmatch_lines([
             "*MyFunction*some*",
         ])
@@ -648,7 +648,7 @@
                 raise ValueError("xyz")
         """)
         p = testdir.makepyfile("def test(hello): pass")
-        result = testdir.inline_runpytest(p)
+        result = testdir.runpytest_inprocess(p)
         assert result.ret != 0
         out = result.stdout.str()
         assert out.find("xyz") != -1
@@ -656,7 +656,7 @@
         numentries = out.count("_ _ _") # separator for traceback entries
         assert numentries == 0
 
-        result = testdir.inline_runpytest("--fulltrace", p)
+        result = testdir.runpytest_inprocess("--fulltrace", p)
         out = result.stdout.str()
         assert out.find("conftest.py:2: ValueError") != -1
         numentries = out.count("_ _ _ _") # separator for traceback entries
@@ -669,7 +669,7 @@
             x = 17
             asd
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         out = result.stdout.str()
         assert "x = 1" not in out
@@ -678,7 +678,7 @@
             " *asd*",
             "E*NameError*",
         ])
-        result = testdir.inline_runpytest("--fulltrace")
+        result = testdir.runpytest_inprocess("--fulltrace")
         out = result.stdout.str()
         assert "x = 1" in out
         assert "x = 2" in out
@@ -769,7 +769,7 @@
     """)
     p2 = p.new(basename=p.basename.replace("test", "check"))
     p.move(p2)
-    result = testdir.inline_runpytest("--collect-only", "-s")
+    result = testdir.runpytest_inprocess("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*check_customized*",
         "*check_simple*",
@@ -777,7 +777,7 @@
         "*check_meth*",
     ])
 
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*2 passed*",
@@ -793,12 +793,12 @@
         def _test_underscore():
             pass
     """)
-    result = testdir.inline_runpytest("--collect-only", "-s")
+    result = testdir.runpytest_inprocess("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*_test_underscore*",
     ])
 
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*1 passed*",
@@ -818,7 +818,7 @@
          def test_hello():
             pass
     """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -842,7 +842,7 @@
             def test_hello(self):
                 pass
     """)
-    result = testdir.inline_runpytest("--collect-only")
+    result = testdir.runpytest_inprocess("--collect-only")
     result.stdout.fnmatch_lines([
         "*MyClass*",
         "*MyInstance*",
@@ -862,6 +862,6 @@
             return Test
         TestFoo = make_test()
     """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     assert "TypeError" not in result.stdout.str()
     assert result.ret == 0

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -33,7 +33,7 @@
             def test_func(some):
                 pass
         """)
-        result = testdir.inline_runpytest() # "--collect-only")
+        result = testdir.runpytest_inprocess() # "--collect-only")
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def test_func(some)*",
@@ -78,7 +78,7 @@
                 def test_method(self, something):
                     assert something is self
         """)
-        result = testdir.inline_runpytest(p)
+        result = testdir.runpytest_inprocess(p)
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -119,9 +119,9 @@
                  def test_spam(self, spam):
                      assert spam == 'spamspam'
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_module(self, testdir):
@@ -142,9 +142,9 @@
             def test_spam(spam):
                 assert spam == 'spamspam'
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_conftest(self, testdir):
@@ -168,9 +168,9 @@
             def test_spam(spam):
                 assert spam == "spamspam"
         """))
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_plugin(self, testdir):
@@ -195,7 +195,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.inline_runpytest('-s')
+        result = testdir.runpytest_inprocess('-s')
         assert result.ret == 0
 
     def test_extend_fixture_plugin_plugin(self, testdir):
@@ -221,7 +221,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret == 0
 
     def test_override_parametrized_fixture_conftest_module(self, testdir):
@@ -243,9 +243,9 @@
             def test_spam(spam):
                 assert spam == 'spam'
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_parametrized_fixture_conftest_conftest(self, testdir):
@@ -270,9 +270,9 @@
             def test_spam(spam):
                 assert spam == "spam"
         """))
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_module(self, testdir):
@@ -297,9 +297,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
@@ -327,9 +327,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """))
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.inline_runpytest(testfile)
+        result = testdir.runpytest_inprocess(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_autouse_fixture_plugin(self, testdir):
@@ -349,7 +349,7 @@
             def test_foo(request):
                 assert request.function.foo == 7
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret == 0
 
     def test_funcarg_lookup_error(self, testdir):
@@ -357,7 +357,7 @@
             def test_lookup_error(unknown):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*ERROR*test_lookup_error*",
             "*def test_lookup_error(unknown):*",
@@ -386,7 +386,7 @@
                     traceback.print_exc()
                 assert sys.exc_info() == (None, None, None)
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret == 0
 
 
@@ -529,7 +529,7 @@
             def test_second():
                 assert len(l) == 1
         """)
-        result = testdir.inline_runpytest(p)
+        result = testdir.runpytest_inprocess(p)
         result.stdout.fnmatch_lines([
             "*1 error*"  # XXX the whole module collection fails
             ])
@@ -614,7 +614,7 @@
         """))
         p = b.join("test_module.py")
         p.write("def test_func(arg1): pass")
-        result = testdir.inline_runpytest(p, "--fixtures")
+        result = testdir.runpytest_inprocess(p, "--fixtures")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *fixtures defined*conftest*
@@ -783,7 +783,7 @@
             def test_two_different_setups(arg1, arg2):
                 assert arg1 != arg2
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -798,7 +798,7 @@
             def test_two_funcarg(arg1):
                 assert arg1 == 11
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -825,7 +825,7 @@
             def test_check_test0_has_teardown_correct():
                 assert test_0.l == [2]
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines([
             "*3 passed*"
         ])
@@ -841,7 +841,7 @@
             def test_func(app):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*3/x*",
@@ -896,7 +896,7 @@
             def test_add(arg2):
                 assert arg2 == 2
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -918,7 +918,7 @@
             def test_add(arg1, arg2):
                 assert arg2 == 2
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -942,7 +942,7 @@
                 assert arg2 == arg1 + 1
                 assert len(l) == arg1
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*2 passed*"
         ])
@@ -962,7 +962,7 @@
             def test_missing(call_fail):
                 pass
             """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines("""
             *pytest.fixture()*
             *def call_fail(fail)*
@@ -1044,7 +1044,7 @@
         reprec.assertoutcome(passed=2)
 
     def test_usefixtures_seen_in_showmarkers(self, testdir):
-        result = testdir.inline_runpytest("--markers")
+        result = testdir.runpytest_inprocess("--markers")
         result.stdout.fnmatch_lines("""
             *usefixtures(fixturename1*mark tests*fixtures*
         """)
@@ -1311,7 +1311,7 @@
         conftest.move(a.join(conftest.basename))
         a.join("test_something.py").write("def test_func(): pass")
         b.join("test_otherthing.py").write("def test_func(): pass")
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines("""
             *1 passed*1 error*
         """)
@@ -1765,7 +1765,7 @@
                 def test_1(arg):
                     pass
             """ % method)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*You tried*function*session*request*",
@@ -1823,7 +1823,7 @@
             def test_mismatch(arg):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*",
             "*1 error*",
@@ -1874,7 +1874,7 @@
             def test_func4(marg):
                 pass
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines("""
             test_mod1.py::test_func[s1] PASSED
             test_mod2.py::test_func2[s1] PASSED
@@ -1926,7 +1926,7 @@
                 def test_3(self):
                     pass
         """)
-        result = testdir.inline_runpytest("-vs")
+        result = testdir.runpytest_inprocess("-vs")
         result.stdout.fnmatch_lines("""
             test_class_ordering.py::TestClass2::test_1[1-a] PASSED
             test_class_ordering.py::TestClass2::test_1[2-a] PASSED
@@ -2017,7 +2017,7 @@
             def test_finish():
                 assert not l
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines("""
             *3 passed*
         """)
@@ -2047,7 +2047,7 @@
             def test_browser(browser):
                 assert browser['visited'] is True
         """))
-        reprec = testdir.inline_runpytest("-s")
+        reprec = testdir.runpytest_inprocess("-s")
         for test in ['test_browser']:
             reprec.stdout.fnmatch_lines('*Finalized*')
 
@@ -2258,7 +2258,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.inline_runpytest('-v')
+        res = testdir.runpytest_inprocess('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2275,7 +2275,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.inline_runpytest('-v')
+        res = testdir.runpytest_inprocess('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2335,7 +2335,7 @@
             def test_something(gen):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2361,7 +2361,7 @@
             def test_3():
                 assert l[0] != l[1]
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines("""
             *ERROR*teardown*test_1*
             *KeyError*
@@ -2381,7 +2381,7 @@
             def test_something():
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2395,7 +2395,7 @@
         assert config.option.showfixtures
 
     def test_show_fixtures(self, testdir):
-        result = testdir.inline_runpytest("--fixtures")
+        result = testdir.runpytest_inprocess("--fixtures")
         result.stdout.fnmatch_lines([
                 "*tmpdir*",
                 "*temporary directory*",
@@ -2403,7 +2403,7 @@
         )
 
     def test_show_fixtures_verbose(self, testdir):
-        result = testdir.inline_runpytest("--fixtures", "-v")
+        result = testdir.runpytest_inprocess("--fixtures", "-v")
         result.stdout.fnmatch_lines([
                 "*tmpdir*--*tmpdir.py*",
                 "*temporary directory*",
@@ -2420,7 +2420,7 @@
             def arg1():
                 """  hello world """
         ''')
-        result = testdir.inline_runpytest("--fixtures", p)
+        result = testdir.runpytest_inprocess("--fixtures", p)
         result.stdout.fnmatch_lines("""
             *tmpdir
             *fixtures defined from*
@@ -2442,7 +2442,7 @@
                 def test_hello():
                     pass
             """)
-        result = testdir.inline_runpytest("--fixtures")
+        result = testdir.runpytest_inprocess("--fixtures")
         result.stdout.fnmatch_lines("""
             *tmpdir*
             *fixtures defined from*conftest*
@@ -2468,7 +2468,7 @@
 
                 """
         ''')
-        result = testdir.inline_runpytest("--fixtures", p)
+        result = testdir.runpytest_inprocess("--fixtures", p)
         result.stdout.fnmatch_lines("""
             * fixtures defined from test_show_fixtures_trimmed_doc *
             arg2
@@ -2496,7 +2496,7 @@
                 print ("test2 %s" % arg1)
                 assert 0
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2519,7 +2519,7 @@
             def test_2(arg1):
                 print ("test2 %s" % arg1)
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2537,7 +2537,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*setup*
             *1 error*
@@ -2553,7 +2553,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*teardown*
             *1 passed*1 error*
@@ -2569,7 +2569,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *fixture function*
             *test_yields*:2*
@@ -2585,7 +2585,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *yield_fixture*requires*yield*
             *yield_fixture*
@@ -2601,7 +2601,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.inline_runpytest("-s")
+        result = testdir.runpytest_inprocess("-s")
         result.stdout.fnmatch_lines("""
             *fixture*cannot use*yield*
             *def arg1*

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -246,7 +246,7 @@
                 assert x in (10,20)
                 assert y == 2
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines([
             "*test_simple*1-2*",
             "*test_simple*2-2*",
@@ -290,7 +290,7 @@
                 def test_meth(self, x, y):
                     assert 0, x
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret == 1
         result.assert_outcomes(failed=6)
 
@@ -330,7 +330,7 @@
                def test_3(self, arg, arg2):
                   pass
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *test_1*1*
@@ -372,7 +372,7 @@
                     assert metafunc.function == unbound
                     assert metafunc.cls == TestClass
         """)
-        result = testdir.inline_runpytest(p, "-v")
+        result = testdir.runpytest_inprocess(p, "-v")
         result.assert_outcomes(passed=2)
 
     def test_addcall_with_two_funcargs_generators(self, testdir):
@@ -389,7 +389,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*0*PASS*",
             "*test_myfunc*1*FAIL*",
@@ -410,7 +410,7 @@
             def test_func2(arg1):
                 assert arg1 in (10, 20)
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*0*PASS*",
             "*test_func1*1*FAIL*",
@@ -427,7 +427,7 @@
                 def test_hello(xyz):
                     pass
         """)
-        result = testdir.inline_runpytest(p)
+        result = testdir.runpytest_inprocess(p)
         result.assert_outcomes(passed=1)
 
 
@@ -450,7 +450,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*test_myfunc*world*FAIL*",
@@ -466,7 +466,7 @@
                 def test_myfunc(self, hello):
                     assert hello == "world"
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*1 passed*"
@@ -483,7 +483,7 @@
                     assert not hasattr(self, 'x')
                     self.x = 1
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*0*PASS*",
             "*test_func*1*PASS*",
@@ -501,7 +501,7 @@
                 def setup_method(self, func):
                     self.val = 1
             """)
-        result = testdir.inline_runpytest(p)
+        result = testdir.runpytest_inprocess(p)
         result.assert_outcomes(passed=1)
 
     def test_parametrize_functional2(self, testdir):
@@ -512,7 +512,7 @@
             def test_hello(arg1, arg2):
                 assert 0, (arg1, arg2)
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         result.stdout.fnmatch_lines([
             "*(1, 4)*",
             "*(1, 5)*",
@@ -537,7 +537,7 @@
             def test_func1(arg1, arg2):
                 assert arg1 == 11
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*1*PASS*",
             "*1 passed*"
@@ -558,7 +558,7 @@
             def test_func(arg2):
                 assert arg2 == 10
         """)
-        result = testdir.inline_runpytest("-v", p)
+        result = testdir.runpytest_inprocess("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*1*PASS*",
             "*1 passed*"
@@ -574,7 +574,7 @@
             def test_function(a, b):
                 assert a == b
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         assert result.ret == 1
         result.stdout.fnmatch_lines_random([
             "*test_function*basic*PASSED",
@@ -591,7 +591,7 @@
             def test_function(a, b):
                 assert 1
         """)
-        result = testdir.inline_runpytest("-v")
+        result = testdir.runpytest_inprocess("-v")
         result.stdout.fnmatch_lines("""
             *test_function*1-b0*
             *test_function*1.3-b1*
@@ -647,7 +647,7 @@
             def test_function():
                 pass
         """)
-        reprec = testdir.inline_runpytest()
+        reprec = testdir.runpytest_inprocess()
         reprec.assert_outcomes(passed=1)
 
     def test_generate_tests_only_done_in_subdir(self, testdir):
@@ -679,7 +679,7 @@
             test_x = make_tests()
             test_y = make_tests()
         """)
-        reprec = testdir.inline_runpytest()
+        reprec = testdir.runpytest_inprocess()
         reprec.assert_outcomes(passed=4)
 
     @pytest.mark.issue463

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_assertrewrite.py
--- a/testing/test_assertrewrite.py
+++ b/testing/test_assertrewrite.py
@@ -468,12 +468,12 @@
         tmp = "--basetemp=%s" % p
         monkeypatch.setenv("PYTHONOPTIMIZE", "2")
         monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
-        assert testdir.runpybin("py.test", tmp).ret == 0
+        assert testdir.runpytest_subprocess(tmp).ret == 0
         tagged = "test_pyc_vs_pyo." + PYTEST_TAG
         assert tagged + ".pyo" in os.listdir("__pycache__")
         monkeypatch.undo()
         monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
-        assert testdir.runpybin("py.test", tmp).ret == 1
+        assert testdir.runpytest_subprocess(tmp).ret == 1
         assert tagged + ".pyc" in os.listdir("__pycache__")
 
     def test_package(self, testdir):

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_capture.py
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -121,7 +121,7 @@
             print (sys.stdout)
             print (%s)
     """ % obj)
-    result = testdir.runpytest_subprocess("--capture=%s" % method)
+    result = testdir.runpytest("--capture=%s" % method)
     result.stdout.fnmatch_lines([
         "*1 passed*"
     ])
@@ -133,7 +133,7 @@
         def test_unicode():
             print ('b\\u00f6y')
     """)
-    result = testdir.runpytest_subprocess("--capture=%s" % method)
+    result = testdir.runpytest("--capture=%s" % method)
     result.stdout.fnmatch_lines([
         "*1 passed*"
     ])
@@ -144,7 +144,7 @@
         print ("collect %s failure" % 13)
         import xyz42123
     """)
-    result = testdir.runpytest_subprocess(p)
+    result = testdir.runpytest(p)
     result.stdout.fnmatch_lines([
         "*Captured stdout*",
         "*collect 13 failure*",
@@ -165,7 +165,7 @@
                 print ("in func2")
                 assert 0
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "setup module*",
             "setup test_func1*",
@@ -188,7 +188,7 @@
             def teardown_function(func):
                 print ("in teardown")
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*test_func():*",
             "*Captured stdout during setup*",
@@ -206,7 +206,7 @@
                 print ("in func2")
                 assert 0
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         s = result.stdout.str()
         assert "in func1" not in s
         assert "in func2" in s
@@ -222,7 +222,7 @@
                 print ("in func1")
                 pass
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             '*teardown_function*',
             '*Captured stdout*',
@@ -240,7 +240,7 @@
             def test_func():
                 pass
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*def teardown_module(mod):*",
             "*Captured stdout*",
@@ -259,7 +259,7 @@
                 sys.stderr.write(str(2))
                 raise ValueError
         """)
-        result = testdir.runpytest_subprocess(p1)
+        result = testdir.runpytest(p1)
         result.stdout.fnmatch_lines([
             "*test_capturing_outerr.py .F",
             "====* FAILURES *====",
@@ -410,7 +410,7 @@
             def test_two(capfd, capsys):
                 pass
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*ERROR*setup*test_one*",
             "*capsys*capfd*same*time*",
@@ -425,7 +425,7 @@
                 print ("xxx42xxx")
                 assert 0
         """ % method)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "xxx42xxx",
         ])
@@ -447,7 +447,7 @@
             def test_hello(capsys, missingarg):
                 pass
         """)
-        result = testdir.runpytest_subprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*test_partial_setup_failure*",
             "*1 error*",
@@ -485,7 +485,7 @@
             raise ValueError(42)
     """))
     sub1.join("test_mod.py").write("def test_func1(): pass")
-    result = testdir.runpytest_subprocess(testdir.tmpdir, '--traceconfig')
+    result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
     result.stdout.fnmatch_lines([
         "*ValueError(42)*",
         "*1 error*"
@@ -512,7 +512,7 @@
             print ("hello19")
     """)
     testdir.makepyfile("def test_func(): pass")
-    result = testdir.runpytest_subprocess()
+    result = testdir.runpytest()
     assert result.ret == 0
     assert 'hello19' not in result.stdout.str()
 
@@ -526,7 +526,7 @@
             os.write(1, omg)
             assert 0
         """)
-    result = testdir.runpytest_subprocess('--cap=fd')
+    result = testdir.runpytest('--cap=fd')
     result.stdout.fnmatch_lines('''
         *def test_func*
         *assert 0*
@@ -541,7 +541,7 @@
             print ("hello19")
     """)
     testdir.makepyfile("def test_func(): pass")
-    result = testdir.runpytest_subprocess("-vs")
+    result = testdir.runpytest("-vs")
     assert result.ret == 0
     assert 'hello19' in result.stdout.str()
 
@@ -562,10 +562,8 @@
         if __name__ == '__main__':
             test_foo()
         """)
-    result = testdir.runpytest_subprocess('--assert=plain')
-    result.stdout.fnmatch_lines([
-        '*2 passed*',
-    ])
+    result = testdir.runpytest('--assert=plain')
+    result.assert_outcomes(passed=2)
 
 
 class TestTextIO:

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_collection.py
--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -312,7 +312,7 @@
     def test_collect_topdir(self, testdir):
         p = testdir.makepyfile("def test_func(): pass")
         id = "::".join([p.basename, "test_func"])
-        # XXX migrate to inline_genitems? (see below)
+        # XXX migrate to collectonly? (see below)
         config = testdir.parseconfig(id)
         topdir = testdir.tmpdir
         rcol = Session(config)

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -39,7 +39,7 @@
             [pytest]
             minversion=9.0
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.ret != 0
         result.stderr.fnmatch_lines([
             "*tox.ini:2*requires*9.0*actual*"
@@ -320,7 +320,7 @@
         def pytest_cmdline_preparse(args):
             args.append("-h")
     """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     result.stdout.fnmatch_lines([
         "*pytest*",
         "*-h*",
@@ -389,11 +389,11 @@
             def test_hello(fix):
                 pass
         """)
-        result = testdir.inline_runpytest()
+        result = testdir.runpytest_inprocess()
         assert result.parseoutcomes()["warnings"] > 0
         assert "hello" not in result.stdout.str()
 
-        result = testdir.inline_runpytest("-rw")
+        result = testdir.runpytest_inprocess("-rw")
         result.stdout.fnmatch_lines("""
             ===*warning summary*===
             *WT1*test_warn_on_test_item*:5*hello*

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_doctest.py
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -1,5 +1,5 @@
 from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
-import py, pytest
+import py
 
 class TestDoctests:
 

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_nose.py
--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -18,7 +18,7 @@
         test_hello.setup = lambda: l.append(1)
         test_hello.teardown = lambda: l.append(2)
     """)
-    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result = testdir.runpytest_inprocess(p, '-p', 'nose')
     result.assert_outcomes(passed=2)
 
 
@@ -63,7 +63,7 @@
             assert l == [1,2]
 
     """)
-    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result = testdir.runpytest_inprocess(p, '-p', 'nose')
     result.assert_outcomes(passed=2)
 
 
@@ -85,7 +85,7 @@
             assert l == [1,2]
 
     """)
-    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result = testdir.runpytest_inprocess(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*TypeError: <lambda>()*"
     ])
@@ -136,7 +136,7 @@
         test_hello.setup = my_setup_partial
         test_hello.teardown = my_teardown_partial
     """)
-    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result = testdir.runpytest_inprocess(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*"
     ])
@@ -203,7 +203,7 @@
                 #expect.append('setup')
                 eq_(self.called, expect)
     """)
-    result = testdir.inline_runpytest(p, '-p', 'nose')
+    result = testdir.runpytest_inprocess(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*10 passed*"
     ])
@@ -234,7 +234,7 @@
             assert items[2] == 2
             assert 1 not in items
     """)
-    result = testdir.inline_runpytest('-p', 'nose')
+    result = testdir.runpytest_inprocess('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -256,7 +256,7 @@
         def test_world():
             assert l == [1]
         """)
-    result = testdir.inline_runpytest('-p', 'nose')
+    result = testdir.runpytest_inprocess('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -272,7 +272,7 @@
             def test_first(self):
                 pass
         """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -297,7 +297,7 @@
             def test_fun(self):
                 pass
         """)
-    result = testdir.inline_runpytest()
+    result = testdir.runpytest_inprocess()
     result.assert_outcomes(passed=1)
 
 @pytest.mark.skipif("sys.version_info < (2,6)")
@@ -323,7 +323,7 @@
                 """Undoes the setup."""
                 raise Exception("should not call teardown for skipped tests")
         ''')
-    reprec = testdir.inline_runpytest()
+    reprec = testdir.runpytest_inprocess()
     reprec.assert_outcomes(passed=1, skipped=1)
 
 
@@ -334,7 +334,7 @@
         def test_failing():
             assert False
         """)
-    result = testdir.inline_runpytest(p)
+    result = testdir.runpytest_inprocess(p)
     result.assert_outcomes(skipped=1)
 
 

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_pdb.py
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -2,6 +2,13 @@
 import py
 import sys
 
+def runpdb_and_get_report(testdir, source):
+    p = testdir.makepyfile(source)
+    result = testdir.runpytest_inprocess("--pdb", p)
+    reports = result.reprec.getreports("pytest_runtest_logreport")
+    assert len(reports) == 3, reports # setup/call/teardown
+    return reports[1]
+
 
 class TestPDB:
     def pytest_funcarg__pdblist(self, request):
@@ -14,7 +21,7 @@
         return pdblist
 
     def test_pdb_on_fail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             def test_func():
                 assert 0
         """)
@@ -24,7 +31,7 @@
         assert tb[-1].name == "test_func"
 
     def test_pdb_on_xfail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import pytest
             @pytest.mark.xfail
             def test_func():
@@ -34,7 +41,7 @@
         assert not pdblist
 
     def test_pdb_on_skip(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import pytest
             def test_func():
                 pytest.skip("hello")
@@ -43,7 +50,7 @@
         assert len(pdblist) == 0
 
     def test_pdb_on_BdbQuit(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import bdb
             def test_func():
                 raise bdb.BdbQuit

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee testing/test_pytester.py
--- a/testing/test_pytester.py
+++ b/testing/test_pytester.py
@@ -69,9 +69,7 @@
             assert 1
     """)
     result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
+    result.assert_outcomes(passed=1)
 
 
 def make_holder():
@@ -114,16 +112,6 @@
         unichr = chr
     testdir.makepyfile(unichr(0xfffd))
 
-def test_inprocess_plugins(testdir):
-    class Plugin(object):
-        configured = False
-        def pytest_configure(self, config):
-            self.configured = True
-    plugin = Plugin()
-    testdir.inprocess_run([], [plugin])
-
-    assert plugin.configured
-
 def test_inline_run_clean_modules(testdir):
     test_mod = testdir.makepyfile("def test_foo(): assert True")
     result = testdir.inline_run(str(test_mod))

diff -r dc1c8c7ea818036073356c01ed74608f0671fc73 -r 390fe4614bd4dc3cbf6506a0304a2cd11c6c16ee tox.ini
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 distshare={homedir}/.tox/distshare
-envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,py27-trial,py33-trial,doctesting,py27-cxfreeze
+envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,{py27,py33}-trial,py27-subprocess,doctesting,py27-cxfreeze
 
 [testenv]
 changedir=testing
@@ -9,6 +9,15 @@
     nose
     mock
 
+[testenv:py27-subprocess]
+changedir=.
+basepython=python2.7
+deps=pytest-xdist
+    mock
+    nose
+commands=
+  py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing}
+
 [testenv:genscript]
 changedir=.
 commands= py.test --genscript=pytest1
@@ -136,7 +145,7 @@
 minversion=2.0
 plugins=pytester
 #--pyargs --doctest-modules --ignore=.tox
-addopts= -rxsX 
+addopts= -rxsX -p pytester
 rsyncdirs=tox.ini pytest.py _pytest testing
 python_files=test_*.py *_test.py testing/*/*.py
 python_classes=Test Acceptance


https://bitbucket.org/pytest-dev/pytest/commits/8e7a43d5ae0d/
Changeset:   8e7a43d5ae0d
Branch:      testrefactor
User:        hpk42
Date:        2015-04-28 09:56:57+00:00
Summary:     merge
Affected #:  0 files



https://bitbucket.org/pytest-dev/pytest/commits/bcbb77bff338/
Changeset:   bcbb77bff338
Branch:      testrefactor
User:        hpk42
Date:        2015-04-28 10:05:08+00:00
Summary:     use runpytest() instead of runpytest_inprocess if a test can run as subprocess as well
Affected #:  5 files

diff -r 8e7a43d5ae0d5bcbc23466091db846187291e4d5 -r bcbb77bff338c564d2b28500d0444932a04ad35a testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -15,7 +15,7 @@
         p.pyimport()
         del py.std.sys.modules['test_whatever']
         b.ensure("test_whatever.py")
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*import*mismatch*",
             "*imported*test_whatever*",
@@ -59,7 +59,7 @@
                 def __init__(self):
                     pass
         """)
-        result = testdir.runpytest_inprocess("-rw")
+        result = testdir.runpytest("-rw")
         result.stdout.fnmatch_lines_random("""
             WC1*test_class_with_init_warning.py*__init__*
         """)
@@ -69,7 +69,7 @@
             class test(object):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*collected 0*",
         ])
@@ -86,7 +86,7 @@
                 def teardown_class(cls):
                     pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*1 passed*",
         ])
@@ -534,7 +534,7 @@
         """)
         testdir.makepyfile("def test_some(): pass")
         testdir.makepyfile(test_xyz="def test_func(): pass")
-        result = testdir.runpytest_inprocess("--collect-only")
+        result = testdir.runpytest("--collect-only")
         result.stdout.fnmatch_lines([
             "*<Module*test_pytest*",
             "*<MyModule*xyz*",
@@ -590,7 +590,7 @@
                     return MyFunction(name, collector)
         """)
         testdir.makepyfile("def some(): pass")
-        result = testdir.runpytest_inprocess("--collect-only")
+        result = testdir.runpytest("--collect-only")
         result.stdout.fnmatch_lines([
             "*MyFunction*some*",
         ])
@@ -648,7 +648,7 @@
                 raise ValueError("xyz")
         """)
         p = testdir.makepyfile("def test(hello): pass")
-        result = testdir.runpytest_inprocess(p)
+        result = testdir.runpytest(p)
         assert result.ret != 0
         out = result.stdout.str()
         assert out.find("xyz") != -1
@@ -656,7 +656,7 @@
         numentries = out.count("_ _ _") # separator for traceback entries
         assert numentries == 0
 
-        result = testdir.runpytest_inprocess("--fulltrace", p)
+        result = testdir.runpytest("--fulltrace", p)
         out = result.stdout.str()
         assert out.find("conftest.py:2: ValueError") != -1
         numentries = out.count("_ _ _ _") # separator for traceback entries
@@ -669,7 +669,7 @@
             x = 17
             asd
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         out = result.stdout.str()
         assert "x = 1" not in out
@@ -678,7 +678,7 @@
             " *asd*",
             "E*NameError*",
         ])
-        result = testdir.runpytest_inprocess("--fulltrace")
+        result = testdir.runpytest("--fulltrace")
         out = result.stdout.str()
         assert "x = 1" in out
         assert "x = 2" in out
@@ -769,7 +769,7 @@
     """)
     p2 = p.new(basename=p.basename.replace("test", "check"))
     p.move(p2)
-    result = testdir.runpytest_inprocess("--collect-only", "-s")
+    result = testdir.runpytest("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*check_customized*",
         "*check_simple*",
@@ -777,7 +777,7 @@
         "*check_meth*",
     ])
 
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*2 passed*",
@@ -793,12 +793,12 @@
         def _test_underscore():
             pass
     """)
-    result = testdir.runpytest_inprocess("--collect-only", "-s")
+    result = testdir.runpytest("--collect-only", "-s")
     result.stdout.fnmatch_lines([
         "*_test_underscore*",
     ])
 
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*1 passed*",
@@ -818,7 +818,7 @@
          def test_hello():
             pass
     """)
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -842,7 +842,7 @@
             def test_hello(self):
                 pass
     """)
-    result = testdir.runpytest_inprocess("--collect-only")
+    result = testdir.runpytest("--collect-only")
     result.stdout.fnmatch_lines([
         "*MyClass*",
         "*MyInstance*",
@@ -862,6 +862,6 @@
             return Test
         TestFoo = make_test()
     """)
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     assert "TypeError" not in result.stdout.str()
     assert result.ret == 0

diff -r 8e7a43d5ae0d5bcbc23466091db846187291e4d5 -r bcbb77bff338c564d2b28500d0444932a04ad35a testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -33,7 +33,7 @@
             def test_func(some):
                 pass
         """)
-        result = testdir.runpytest_inprocess() # "--collect-only")
+        result = testdir.runpytest() # "--collect-only")
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def test_func(some)*",
@@ -78,7 +78,7 @@
                 def test_method(self, something):
                     assert something is self
         """)
-        result = testdir.runpytest_inprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -119,9 +119,9 @@
                  def test_spam(self, spam):
                      assert spam == 'spamspam'
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_module(self, testdir):
@@ -142,9 +142,9 @@
             def test_spam(spam):
                 assert spam == 'spamspam'
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_conftest(self, testdir):
@@ -168,9 +168,9 @@
             def test_spam(spam):
                 assert spam == "spamspam"
         """))
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_extend_fixture_conftest_plugin(self, testdir):
@@ -195,7 +195,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.runpytest_inprocess('-s')
+        result = testdir.runpytest('-s')
         assert result.ret == 0
 
     def test_extend_fixture_plugin_plugin(self, testdir):
@@ -221,7 +221,7 @@
             def test_foo(foo):
                 assert foo == 14
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret == 0
 
     def test_override_parametrized_fixture_conftest_module(self, testdir):
@@ -243,9 +243,9 @@
             def test_spam(spam):
                 assert spam == 'spam'
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_parametrized_fixture_conftest_conftest(self, testdir):
@@ -270,9 +270,9 @@
             def test_spam(spam):
                 assert spam == "spam"
         """))
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*1 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_module(self, testdir):
@@ -297,9 +297,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
@@ -327,9 +327,9 @@
                 assert spam == params['spam']
                 params['spam'] += 1
         """))
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines(["*3 passed*"])
-        result = testdir.runpytest_inprocess(testfile)
+        result = testdir.runpytest(testfile)
         result.stdout.fnmatch_lines(["*3 passed*"])
 
     def test_autouse_fixture_plugin(self, testdir):
@@ -349,7 +349,7 @@
             def test_foo(request):
                 assert request.function.foo == 7
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret == 0
 
     def test_funcarg_lookup_error(self, testdir):
@@ -357,7 +357,7 @@
             def test_lookup_error(unknown):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*ERROR*test_lookup_error*",
             "*def test_lookup_error(unknown):*",
@@ -386,7 +386,7 @@
                     traceback.print_exc()
                 assert sys.exc_info() == (None, None, None)
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret == 0
 
 
@@ -529,7 +529,7 @@
             def test_second():
                 assert len(l) == 1
         """)
-        result = testdir.runpytest_inprocess(p)
+        result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
             "*1 error*"  # XXX the whole module collection fails
             ])
@@ -614,7 +614,7 @@
         """))
         p = b.join("test_module.py")
         p.write("def test_func(arg1): pass")
-        result = testdir.runpytest_inprocess(p, "--fixtures")
+        result = testdir.runpytest(p, "--fixtures")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *fixtures defined*conftest*
@@ -783,7 +783,7 @@
             def test_two_different_setups(arg1, arg2):
                 assert arg1 != arg2
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -798,7 +798,7 @@
             def test_two_funcarg(arg1):
                 assert arg1 == 11
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines([
             "*1 passed*"
         ])
@@ -825,7 +825,7 @@
             def test_check_test0_has_teardown_correct():
                 assert test_0.l == [2]
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines([
             "*3 passed*"
         ])
@@ -841,7 +841,7 @@
             def test_func(app):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*3/x*",
@@ -896,7 +896,7 @@
             def test_add(arg2):
                 assert arg2 == 2
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -918,7 +918,7 @@
             def test_add(arg1, arg2):
                 assert arg2 == 2
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*involved factories*",
             "* def arg2*",
@@ -942,7 +942,7 @@
                 assert arg2 == arg1 + 1
                 assert len(l) == arg1
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*2 passed*"
         ])
@@ -962,7 +962,7 @@
             def test_missing(call_fail):
                 pass
             """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines("""
             *pytest.fixture()*
             *def call_fail(fail)*
@@ -1044,7 +1044,7 @@
         reprec.assertoutcome(passed=2)
 
     def test_usefixtures_seen_in_showmarkers(self, testdir):
-        result = testdir.runpytest_inprocess("--markers")
+        result = testdir.runpytest("--markers")
         result.stdout.fnmatch_lines("""
             *usefixtures(fixturename1*mark tests*fixtures*
         """)
@@ -1311,7 +1311,7 @@
         conftest.move(a.join(conftest.basename))
         a.join("test_something.py").write("def test_func(): pass")
         b.join("test_otherthing.py").write("def test_func(): pass")
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines("""
             *1 passed*1 error*
         """)
@@ -1765,7 +1765,7 @@
                 def test_1(arg):
                     pass
             """ % method)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*You tried*function*session*request*",
@@ -1823,7 +1823,7 @@
             def test_mismatch(arg):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*ScopeMismatch*",
             "*1 error*",
@@ -1874,7 +1874,7 @@
             def test_func4(marg):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines("""
             test_mod1.py::test_func[s1] PASSED
             test_mod2.py::test_func2[s1] PASSED
@@ -1926,7 +1926,7 @@
                 def test_3(self):
                     pass
         """)
-        result = testdir.runpytest_inprocess("-vs")
+        result = testdir.runpytest("-vs")
         result.stdout.fnmatch_lines("""
             test_class_ordering.py::TestClass2::test_1[1-a] PASSED
             test_class_ordering.py::TestClass2::test_1[2-a] PASSED
@@ -2017,7 +2017,7 @@
             def test_finish():
                 assert not l
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines("""
             *3 passed*
         """)
@@ -2047,7 +2047,7 @@
             def test_browser(browser):
                 assert browser['visited'] is True
         """))
-        reprec = testdir.runpytest_inprocess("-s")
+        reprec = testdir.runpytest("-s")
         for test in ['test_browser']:
             reprec.stdout.fnmatch_lines('*Finalized*')
 
@@ -2258,7 +2258,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.runpytest_inprocess('-v')
+        res = testdir.runpytest('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2275,7 +2275,7 @@
             def test_foo(fix):
                 assert 1
         """)
-        res = testdir.runpytest_inprocess('-v')
+        res = testdir.runpytest('-v')
         res.stdout.fnmatch_lines([
             '*test_foo*alpha*',
             '*test_foo*beta*'])
@@ -2335,7 +2335,7 @@
             def test_something(gen):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2361,7 +2361,7 @@
             def test_3():
                 assert l[0] != l[1]
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines("""
             *ERROR*teardown*test_1*
             *KeyError*
@@ -2381,7 +2381,7 @@
             def test_something():
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "*def gen(qwe123):*",
@@ -2395,7 +2395,7 @@
         assert config.option.showfixtures
 
     def test_show_fixtures(self, testdir):
-        result = testdir.runpytest_inprocess("--fixtures")
+        result = testdir.runpytest("--fixtures")
         result.stdout.fnmatch_lines([
                 "*tmpdir*",
                 "*temporary directory*",
@@ -2403,7 +2403,7 @@
         )
 
     def test_show_fixtures_verbose(self, testdir):
-        result = testdir.runpytest_inprocess("--fixtures", "-v")
+        result = testdir.runpytest("--fixtures", "-v")
         result.stdout.fnmatch_lines([
                 "*tmpdir*--*tmpdir.py*",
                 "*temporary directory*",
@@ -2420,7 +2420,7 @@
             def arg1():
                 """  hello world """
         ''')
-        result = testdir.runpytest_inprocess("--fixtures", p)
+        result = testdir.runpytest("--fixtures", p)
         result.stdout.fnmatch_lines("""
             *tmpdir
             *fixtures defined from*
@@ -2442,7 +2442,7 @@
                 def test_hello():
                     pass
             """)
-        result = testdir.runpytest_inprocess("--fixtures")
+        result = testdir.runpytest("--fixtures")
         result.stdout.fnmatch_lines("""
             *tmpdir*
             *fixtures defined from*conftest*
@@ -2468,7 +2468,7 @@
 
                 """
         ''')
-        result = testdir.runpytest_inprocess("--fixtures", p)
+        result = testdir.runpytest("--fixtures", p)
         result.stdout.fnmatch_lines("""
             * fixtures defined from test_show_fixtures_trimmed_doc *
             arg2
@@ -2496,7 +2496,7 @@
                 print ("test2 %s" % arg1)
                 assert 0
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2519,7 +2519,7 @@
             def test_2(arg1):
                 print ("test2 %s" % arg1)
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *setup*
             *test1 1*
@@ -2537,7 +2537,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*setup*
             *1 error*
@@ -2553,7 +2553,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *pytest.fail*teardown*
             *1 passed*1 error*
@@ -2569,7 +2569,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *fixture function*
             *test_yields*:2*
@@ -2585,7 +2585,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *yield_fixture*requires*yield*
             *yield_fixture*
@@ -2601,7 +2601,7 @@
             def test_1(arg1):
                 pass
         """)
-        result = testdir.runpytest_inprocess("-s")
+        result = testdir.runpytest("-s")
         result.stdout.fnmatch_lines("""
             *fixture*cannot use*yield*
             *def arg1*

diff -r 8e7a43d5ae0d5bcbc23466091db846187291e4d5 -r bcbb77bff338c564d2b28500d0444932a04ad35a testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -246,7 +246,7 @@
                 assert x in (10,20)
                 assert y == 2
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines([
             "*test_simple*1-2*",
             "*test_simple*2-2*",
@@ -290,7 +290,7 @@
                 def test_meth(self, x, y):
                     assert 0, x
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret == 1
         result.assert_outcomes(failed=6)
 
@@ -330,7 +330,7 @@
                def test_3(self, arg, arg2):
                   pass
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         assert result.ret == 0
         result.stdout.fnmatch_lines("""
             *test_1*1*
@@ -372,7 +372,7 @@
                     assert metafunc.function == unbound
                     assert metafunc.cls == TestClass
         """)
-        result = testdir.runpytest_inprocess(p, "-v")
+        result = testdir.runpytest(p, "-v")
         result.assert_outcomes(passed=2)
 
     def test_addcall_with_two_funcargs_generators(self, testdir):
@@ -389,7 +389,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*0*PASS*",
             "*test_myfunc*1*FAIL*",
@@ -410,7 +410,7 @@
             def test_func2(arg1):
                 assert arg1 in (10, 20)
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*0*PASS*",
             "*test_func1*1*FAIL*",
@@ -427,7 +427,7 @@
                 def test_hello(xyz):
                     pass
         """)
-        result = testdir.runpytest_inprocess(p)
+        result = testdir.runpytest(p)
         result.assert_outcomes(passed=1)
 
 
@@ -450,7 +450,7 @@
                 def test_myfunc(self, arg1, arg2):
                     assert arg1 == arg2
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*test_myfunc*world*FAIL*",
@@ -466,7 +466,7 @@
                 def test_myfunc(self, hello):
                     assert hello == "world"
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_myfunc*hello*PASS*",
             "*1 passed*"
@@ -483,7 +483,7 @@
                     assert not hasattr(self, 'x')
                     self.x = 1
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*0*PASS*",
             "*test_func*1*PASS*",
@@ -501,7 +501,7 @@
                 def setup_method(self, func):
                     self.val = 1
             """)
-        result = testdir.runpytest_inprocess(p)
+        result = testdir.runpytest(p)
         result.assert_outcomes(passed=1)
 
     def test_parametrize_functional2(self, testdir):
@@ -512,7 +512,7 @@
             def test_hello(arg1, arg2):
                 assert 0, (arg1, arg2)
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         result.stdout.fnmatch_lines([
             "*(1, 4)*",
             "*(1, 5)*",
@@ -537,7 +537,7 @@
             def test_func1(arg1, arg2):
                 assert arg1 == 11
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func1*1*PASS*",
             "*1 passed*"
@@ -558,7 +558,7 @@
             def test_func(arg2):
                 assert arg2 == 10
         """)
-        result = testdir.runpytest_inprocess("-v", p)
+        result = testdir.runpytest("-v", p)
         result.stdout.fnmatch_lines([
             "*test_func*1*PASS*",
             "*1 passed*"
@@ -574,7 +574,7 @@
             def test_function(a, b):
                 assert a == b
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         assert result.ret == 1
         result.stdout.fnmatch_lines_random([
             "*test_function*basic*PASSED",
@@ -591,7 +591,7 @@
             def test_function(a, b):
                 assert 1
         """)
-        result = testdir.runpytest_inprocess("-v")
+        result = testdir.runpytest("-v")
         result.stdout.fnmatch_lines("""
             *test_function*1-b0*
             *test_function*1.3-b1*
@@ -647,7 +647,7 @@
             def test_function():
                 pass
         """)
-        reprec = testdir.runpytest_inprocess()
+        reprec = testdir.runpytest()
         reprec.assert_outcomes(passed=1)
 
     def test_generate_tests_only_done_in_subdir(self, testdir):
@@ -679,7 +679,7 @@
             test_x = make_tests()
             test_y = make_tests()
         """)
-        reprec = testdir.runpytest_inprocess()
+        reprec = testdir.runpytest()
         reprec.assert_outcomes(passed=4)
 
     @pytest.mark.issue463

diff -r 8e7a43d5ae0d5bcbc23466091db846187291e4d5 -r bcbb77bff338c564d2b28500d0444932a04ad35a testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -39,7 +39,7 @@
             [pytest]
             minversion=9.0
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.ret != 0
         result.stderr.fnmatch_lines([
             "*tox.ini:2*requires*9.0*actual*"
@@ -320,7 +320,7 @@
         def pytest_cmdline_preparse(args):
             args.append("-h")
     """)
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     result.stdout.fnmatch_lines([
         "*pytest*",
         "*-h*",
@@ -389,11 +389,11 @@
             def test_hello(fix):
                 pass
         """)
-        result = testdir.runpytest_inprocess()
+        result = testdir.runpytest()
         assert result.parseoutcomes()["warnings"] > 0
         assert "hello" not in result.stdout.str()
 
-        result = testdir.runpytest_inprocess("-rw")
+        result = testdir.runpytest("-rw")
         result.stdout.fnmatch_lines("""
             ===*warning summary*===
             *WT1*test_warn_on_test_item*:5*hello*

diff -r 8e7a43d5ae0d5bcbc23466091db846187291e4d5 -r bcbb77bff338c564d2b28500d0444932a04ad35a testing/test_nose.py
--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -18,7 +18,7 @@
         test_hello.setup = lambda: l.append(1)
         test_hello.teardown = lambda: l.append(2)
     """)
-    result = testdir.runpytest_inprocess(p, '-p', 'nose')
+    result = testdir.runpytest(p, '-p', 'nose')
     result.assert_outcomes(passed=2)
 
 
@@ -63,7 +63,7 @@
             assert l == [1,2]
 
     """)
-    result = testdir.runpytest_inprocess(p, '-p', 'nose')
+    result = testdir.runpytest(p, '-p', 'nose')
     result.assert_outcomes(passed=2)
 
 
@@ -85,7 +85,7 @@
             assert l == [1,2]
 
     """)
-    result = testdir.runpytest_inprocess(p, '-p', 'nose')
+    result = testdir.runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*TypeError: <lambda>()*"
     ])
@@ -136,7 +136,7 @@
         test_hello.setup = my_setup_partial
         test_hello.teardown = my_teardown_partial
     """)
-    result = testdir.runpytest_inprocess(p, '-p', 'nose')
+    result = testdir.runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*"
     ])
@@ -203,7 +203,7 @@
                 #expect.append('setup')
                 eq_(self.called, expect)
     """)
-    result = testdir.runpytest_inprocess(p, '-p', 'nose')
+    result = testdir.runpytest(p, '-p', 'nose')
     result.stdout.fnmatch_lines([
         "*10 passed*"
     ])
@@ -234,7 +234,7 @@
             assert items[2] == 2
             assert 1 not in items
     """)
-    result = testdir.runpytest_inprocess('-p', 'nose')
+    result = testdir.runpytest('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -256,7 +256,7 @@
         def test_world():
             assert l == [1]
         """)
-    result = testdir.runpytest_inprocess('-p', 'nose')
+    result = testdir.runpytest('-p', 'nose')
     result.stdout.fnmatch_lines([
         "*2 passed*",
     ])
@@ -272,7 +272,7 @@
             def test_first(self):
                 pass
         """)
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     result.stdout.fnmatch_lines([
         "*1 passed*",
     ])
@@ -297,7 +297,7 @@
             def test_fun(self):
                 pass
         """)
-    result = testdir.runpytest_inprocess()
+    result = testdir.runpytest()
     result.assert_outcomes(passed=1)
 
 @pytest.mark.skipif("sys.version_info < (2,6)")
@@ -323,7 +323,7 @@
                 """Undoes the setup."""
                 raise Exception("should not call teardown for skipped tests")
         ''')
-    reprec = testdir.runpytest_inprocess()
+    reprec = testdir.runpytest()
     reprec.assert_outcomes(passed=1, skipped=1)
 
 
@@ -334,7 +334,7 @@
         def test_failing():
             assert False
         """)
-    result = testdir.runpytest_inprocess(p)
+    result = testdir.runpytest(p)
     result.assert_outcomes(skipped=1)
 
 


https://bitbucket.org/pytest-dev/pytest/commits/7d4a0b78d19b/
Changeset:   7d4a0b78d19b
User:        hpk42
Date:        2015-04-29 14:32:28+00:00
Summary:     Merged in hpk42/pytest-patches/testrefactor (pull request #284)

majorly refactor pytester and speed/streamline  tests
Affected #:  25 files

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -43,6 +43,14 @@
   implementations.  Use the ``hookwrapper`` mechanism instead already 
   introduced with pytest-2.7.
 
+- speed up pytest's own test suite considerably by using inprocess
+  tests by default (testrun can be modified with --runpytest=subprocess
+  to create subprocesses in many places instead).  The main
+  APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess"
+  and "runpytest_inprocess" if you need a particular way of running
+  the test.  In all cases you get back a RunResult but the inprocess
+  one will also have a "reprec" attribute with the recorded events/reports.
+
  
 2.7.1.dev (compared to 2.7.0)
 -----------------------------

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -29,17 +29,24 @@
                   initialization.
     """
     try:
-        config = _prepareconfig(args, plugins)
-    except ConftestImportFailure:
-        e = sys.exc_info()[1]
-        tw = py.io.TerminalWriter(sys.stderr)
-        for line in traceback.format_exception(*e.excinfo):
-            tw.line(line.rstrip(), red=True)
-        tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+        try:
+            config = _prepareconfig(args, plugins)
+        except ConftestImportFailure as e:
+            tw = py.io.TerminalWriter(sys.stderr)
+            for line in traceback.format_exception(*e.excinfo):
+                tw.line(line.rstrip(), red=True)
+            tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+            return 4
+        else:
+            try:
+                config.pluginmanager.check_pending()
+                return config.hook.pytest_cmdline_main(config=config)
+            finally:
+                config._ensure_unconfigure()
+    except UsageError as e:
+        for msg in e.args:
+            sys.stderr.write("ERROR: %s\n" %(msg,))
         return 4
-    else:
-        config.pluginmanager.check_pending()
-        return config.hook.pytest_cmdline_main(config=config)
 
 class cmdline:  # compatibility namespace
     main = staticmethod(main)
@@ -81,12 +88,18 @@
         if not isinstance(args, str):
             raise ValueError("not a string or argument list: %r" % (args,))
         args = shlex.split(args)
-    pluginmanager = get_config().pluginmanager
-    if plugins:
-        for plugin in plugins:
-            pluginmanager.register(plugin)
-    return pluginmanager.hook.pytest_cmdline_parse(
-            pluginmanager=pluginmanager, args=args)
+    config = get_config()
+    pluginmanager = config.pluginmanager
+    try:
+        if plugins:
+            for plugin in plugins:
+                pluginmanager.register(plugin)
+        return pluginmanager.hook.pytest_cmdline_parse(
+                pluginmanager=pluginmanager, args=args)
+    except BaseException:
+        config._ensure_unconfigure()
+        raise
+
 
 def exclude_pytest_names(name):
     return not name.startswith(name) or name == "pytest_plugins" or \
@@ -259,7 +272,10 @@
 
     def consider_pluginarg(self, arg):
         if arg.startswith("no:"):
-            self.set_blocked(arg[3:])
+            name = arg[3:]
+            self.set_blocked(name)
+            if not name.startswith("pytest_"):
+                self.set_blocked("pytest_" + name)
         else:
             self.import_plugin(arg)
 

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -83,10 +83,7 @@
             initstate = 2
             doit(config, session)
         except pytest.UsageError:
-            args = sys.exc_info()[1].args
-            for msg in args:
-                sys.stderr.write("ERROR: %s\n" %(msg,))
-            session.exitstatus = EXIT_USAGEERROR
+            raise
         except KeyboardInterrupt:
             excinfo = py.code.ExceptionInfo()
             config.hook.pytest_keyboard_interrupt(excinfo=excinfo)

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -1,5 +1,7 @@
 """ (disabled by default) support for testing pytest and pytest plugins. """
+import gc
 import sys
+import traceback
 import os
 import codecs
 import re
@@ -15,6 +17,136 @@
 
 from _pytest.main import Session, EXIT_OK
 
+
+def pytest_addoption(parser):
+    # group = parser.getgroup("pytester", "pytester (self-tests) options")
+    parser.addoption('--lsof',
+           action="store_true", dest="lsof", default=False,
+           help=("run FD checks if lsof is available"))
+
+    parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+           choices=("inprocess", "subprocess", ),
+           help=("run pytest sub runs in tests using an 'inprocess' "
+                 "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+    # This might be called multiple times. Only take the first.
+    global _pytest_fullpath
+    try:
+        _pytest_fullpath
+    except NameError:
+        _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+    if config.getvalue("lsof"):
+        checker = LsofFdLeakChecker()
+        if checker.matching_platform():
+            config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+    def get_open_files(self):
+        out = self._exec_lsof()
+        open_files = self._parse_lsof_output(out)
+        return open_files
+
+    def _exec_lsof(self):
+        pid = os.getpid()
+        return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+    def _parse_lsof_output(self, out):
+        def isopen(line):
+            return line.startswith('f') and ("deleted" not in line and
+                'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+        open_files = []
+
+        for line in out.split("\n"):
+            if isopen(line):
+                fields = line.split('\0')
+                fd = fields[0][1:]
+                filename = fields[1][1:]
+                if filename.startswith('/'):
+                    open_files.append((fd, filename))
+
+        return open_files
+
+    def matching_platform(self):
+        try:
+            py.process.cmdexec("lsof -v")
+        except py.process.cmdexec.Error:
+            return False
+        else:
+            return True
+
+    @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
+    def pytest_runtest_item(self, item):
+        lines1 = self.get_open_files()
+        yield
+        if hasattr(sys, "pypy_version_info"):
+            gc.collect()
+        lines2 = self.get_open_files()
+
+        new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+        leaked_files = [t for t in lines2 if t[0] in new_fds]
+        if leaked_files:
+            error = []
+            error.append("***** %s FD leakage detected" % len(leaked_files))
+            error.extend([str(f) for f in leaked_files])
+            error.append("*** Before:")
+            error.extend([str(f) for f in lines1])
+            error.append("*** After:")
+            error.extend([str(f) for f in lines2])
+            error.append(error[0])
+            error.append("*** function %s:%s: %s " % item.location)
+            pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+    'python2.7': r'C:\Python27\python.exe',
+    'python2.6': r'C:\Python26\python.exe',
+    'python3.1': r'C:\Python31\python.exe',
+    'python3.2': r'C:\Python32\python.exe',
+    'python3.3': r'C:\Python33\python.exe',
+    'python3.4': r'C:\Python34\python.exe',
+    'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+    try:
+        return cache[name]
+    except KeyError:
+        executable = py.path.local.sysfind(name)
+        if executable:
+            if name == "jython":
+                import subprocess
+                popen = subprocess.Popen([str(executable), "--version"],
+                    universal_newlines=True, stderr=subprocess.PIPE)
+                out, err = popen.communicate()
+                if not err or "2.5" not in err:
+                    executable = None
+                if "2.5.2" in err:
+                    executable = None # http://bugs.jython.org/issue1790
+        cache[name] = executable
+        return executable
+
+ at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+                        'pypy', 'pypy3'])
+def anypython(request):
+    name = request.param
+    executable = getexecutable(name)
+    if executable is None:
+        if sys.platform == "win32":
+            executable = winpymap.get(name, None)
+            if executable:
+                executable = py.path.local(executable)
+                if executable.check():
+                    return executable
+        pytest.skip("no suitable %s found" % (name,))
+    return executable
+
 # used at least by pytest-xdist plugin
 @pytest.fixture
 def _pytest(request):
@@ -39,23 +171,6 @@
     return [x for x in l if x[0] != "_"]
 
 
-def pytest_addoption(parser):
-    group = parser.getgroup("pylib")
-    group.addoption('--no-tools-on-path',
-           action="store_true", dest="notoolsonpath", default=False,
-           help=("discover tools on PATH instead of going through py.cmdline.")
-    )
-
-def pytest_configure(config):
-    # This might be called multiple times. Only take the first.
-    global _pytest_fullpath
-    try:
-        _pytest_fullpath
-    except NameError:
-        _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
-        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
-
-
 class ParsedCall:
     def __init__(self, name, kwargs):
         self.__dict__.update(kwargs)
@@ -201,9 +316,11 @@
     return LineMatcher
 
 def pytest_funcarg__testdir(request):
-    tmptestdir = TmpTestdir(request)
+    tmptestdir = Testdir(request)
     return tmptestdir
 
+
+
 rex_outcome = re.compile("(\d+) (\w+)")
 class RunResult:
     """The result of running a command.
@@ -213,10 +330,10 @@
     :ret: The return value.
     :outlines: List of lines captured from stdout.
     :errlines: List of lines captures from stderr.
-    :stdout: LineMatcher of stdout, use ``stdout.str()`` to
+    :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
        reconstruct stdout or the commonly used
        ``stdout.fnmatch_lines()`` method.
-    :stderrr: LineMatcher of stderr.
+    :stderrr: :py:class:`LineMatcher` of stderr.
     :duration: Duration in seconds.
 
     """
@@ -229,6 +346,8 @@
         self.duration = duration
 
     def parseoutcomes(self):
+        """ Return a dictionary of outcomestring->num from parsing
+        the terminal output that the test process produced."""
         for line in reversed(self.outlines):
             if 'seconds' in line:
                 outcomes = rex_outcome.findall(line)
@@ -238,14 +357,17 @@
                         d[cat] = int(num)
                     return d
 
-    def assertoutcome(self, passed=0, skipped=0, failed=0):
+    def assert_outcomes(self, passed=0, skipped=0, failed=0):
+        """ assert that the specified outcomes appear with the respective
+        numbers (0 means it didn't occur) in the text output from a test run."""
         d = self.parseoutcomes()
         assert passed == d.get("passed", 0)
         assert skipped == d.get("skipped", 0)
         assert failed == d.get("failed", 0)
 
 
-class TmpTestdir:
+
+class Testdir:
     """Temporary test directory with tools to test/run py.test itself.
 
     This is based on the ``tmpdir`` fixture but provides a number of
@@ -268,7 +390,6 @@
 
     def __init__(self, request):
         self.request = request
-        self.Config = request.config.__class__
         # XXX remove duplication with tmpdir plugin
         basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
         name = request.function.__name__
@@ -280,12 +401,18 @@
             break
         self.tmpdir = tmpdir
         self.plugins = []
-        self._savesyspath = list(sys.path)
+        self._savesyspath = (list(sys.path), list(sys.meta_path))
+        self._savemodulekeys = set(sys.modules)
         self.chdir() # always chdir
         self.request.addfinalizer(self.finalize)
+        method = self.request.config.getoption("--runpytest")
+        if method == "inprocess":
+            self._runpytest_method = self.runpytest_inprocess
+        elif method == "subprocess":
+            self._runpytest_method = self.runpytest_subprocess
 
     def __repr__(self):
-        return "<TmpTestdir %r>" % (self.tmpdir,)
+        return "<Testdir %r>" % (self.tmpdir,)
 
     def finalize(self):
         """Clean up global state artifacts.
@@ -296,23 +423,22 @@
         has finished.
 
         """
-        sys.path[:] = self._savesyspath
+        sys.path[:], sys.meta_path[:] = self._savesyspath
         if hasattr(self, '_olddir'):
             self._olddir.chdir()
         self.delete_loaded_modules()
 
     def delete_loaded_modules(self):
-        """Delete modules that have been loaded from tmpdir.
+        """Delete modules that have been loaded during a test.
 
         This allows the interpreter to catch module changes in case
         the module is re-imported.
-
         """
-        for name, mod in list(sys.modules.items()):
-            if mod:
-                fn = getattr(mod, '__file__', None)
-                if fn and fn.startswith(str(self.tmpdir)):
-                    del sys.modules[name]
+        for name in set(sys.modules).difference(self._savemodulekeys):
+            # it seems zope.interfaces is keeping some state
+            # (used by twisted related tests)
+            if name != "zope.interface":
+                del sys.modules[name]
 
     def make_hook_recorder(self, pluginmanager):
         """Create a new :py:class:`HookRecorder` for a PluginManager."""
@@ -503,43 +629,19 @@
         l = list(cmdlineargs) + [p]
         return self.inline_run(*l)
 
-    def inline_runsource1(self, *args):
-        """Run a test module in process using ``pytest.main()``.
-
-        This behaves exactly like :py:meth:`inline_runsource` and
-        takes identical arguments.  However the return value is a list
-        of the reports created by the pytest_runtest_logreport hook
-        during the run.
-
-        """
-        args = list(args)
-        source = args.pop()
-        p = self.makepyfile(source)
-        l = list(args) + [p]
-        reprec = self.inline_run(*l)
-        reports = reprec.getreports("pytest_runtest_logreport")
-        assert len(reports) == 3, reports # setup/call/teardown
-        return reports[1]
-
     def inline_genitems(self, *args):
         """Run ``pytest.main(['--collectonly'])`` in-process.
 
         Retuns a tuple of the collected items and a
         :py:class:`HookRecorder` instance.
 
-        """
-        return self.inprocess_run(list(args) + ['--collectonly'])
-
-    def inprocess_run(self, args, plugins=()):
-        """Run ``pytest.main()`` in-process, return Items and a HookRecorder.
-
         This runs the :py:func:`pytest.main` function to run all of
         py.test inside the test process itself like
         :py:meth:`inline_run`.  However the return value is a tuple of
         the collection items and a :py:class:`HookRecorder` instance.
 
         """
-        rec = self.inline_run(*args, plugins=plugins)
+        rec = self.inline_run("--collect-only", *args)
         items = [x.item for x in rec.getcalls("pytest_itemcollected")]
         return items, rec
 
@@ -568,12 +670,50 @@
         plugins = kwargs.get("plugins") or []
         plugins.append(Collect())
         ret = pytest.main(list(args), plugins=plugins)
-        assert len(rec) == 1
-        reprec = rec[0]
+        self.delete_loaded_modules()
+        if len(rec) == 1:
+            reprec = rec.pop()
+        else:
+            class reprec:
+                pass
         reprec.ret = ret
-        self.delete_loaded_modules()
         return reprec
 
+    def runpytest_inprocess(self, *args, **kwargs):
+        """ Return result of running pytest in-process, providing a similar
+        interface to what self.runpytest() provides. """
+        if kwargs.get("syspathinsert"):
+            self.syspathinsert()
+        now = time.time()
+        capture = py.io.StdCapture()
+        try:
+            try:
+                reprec = self.inline_run(*args)
+            except SystemExit as e:
+                class reprec:
+                    ret = e.args[0]
+            except Exception:
+                traceback.print_exc()
+                class reprec:
+                    ret = 3
+        finally:
+            out, err = capture.reset()
+            sys.stdout.write(out)
+            sys.stderr.write(err)
+
+        res = RunResult(reprec.ret,
+                        out.split("\n"), err.split("\n"),
+                        time.time()-now)
+        res.reprec = reprec
+        return res
+
+    def runpytest(self, *args, **kwargs):
+        """ Run pytest inline or in a subprocess, depending on the command line
+        option "--runpytest" and return a :py:class:`RunResult`.
+
+        """
+        return self._runpytest_method(*args, **kwargs)
+
     def parseconfig(self, *args):
         """Return a new py.test Config instance from given commandline args.
 
@@ -745,57 +885,23 @@
         except UnicodeEncodeError:
             print("couldn't print to %s because of encoding" % (fp,))
 
-    def runpybin(self, scriptname, *args):
-        """Run a py.* tool with arguments.
+    def _getpytestargs(self):
+        # we cannot use "(sys.executable,script)"
+        # because on windows the script is e.g. a py.test.exe
+        return (sys.executable, _pytest_fullpath,) # noqa
 
-        This can realy only be used to run py.test, you probably want
-            :py:meth:`runpytest` instead.
+    def runpython(self, script):
+        """Run a python script using sys.executable as interpreter.
 
         Returns a :py:class:`RunResult`.
-
         """
-        fullargs = self._getpybinargs(scriptname) + args
-        return self.run(*fullargs)
-
-    def _getpybinargs(self, scriptname):
-        if not self.request.config.getvalue("notoolsonpath"):
-            # XXX we rely on script referring to the correct environment
-            # we cannot use "(sys.executable,script)"
-            # because on windows the script is e.g. a py.test.exe
-            return (sys.executable, _pytest_fullpath,) # noqa
-        else:
-            pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
-
-    def runpython(self, script, prepend=True):
-        """Run a python script.
-
-        If ``prepend`` is True then the directory from which the py
-        package has been imported will be prepended to sys.path.
-
-        Returns a :py:class:`RunResult`.
-
-        """
-        # XXX The prepend feature is probably not very useful since the
-        #     split of py and pytest.
-        if prepend:
-            s = self._getsysprepend()
-            if s:
-                script.write(s + "\n" + script.read())
         return self.run(sys.executable, script)
 
-    def _getsysprepend(self):
-        if self.request.config.getvalue("notoolsonpath"):
-            s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
-        else:
-            s = ""
-        return s
-
     def runpython_c(self, command):
         """Run python -c "command", return a :py:class:`RunResult`."""
-        command = self._getsysprepend() + command
         return self.run(sys.executable, "-c", command)
 
-    def runpytest(self, *args):
+    def runpytest_subprocess(self, *args, **kwargs):
         """Run py.test as a subprocess with given arguments.
 
         Any plugins added to the :py:attr:`plugins` list will added
@@ -820,7 +926,8 @@
         plugins = [x for x in self.plugins if isinstance(x, str)]
         if plugins:
             args = ('-p', plugins[0]) + args
-        return self.runpybin("py.test", *args)
+        args = self._getpytestargs() + args
+        return self.run(*args)
 
     def spawn_pytest(self, string, expect_timeout=10.0):
         """Run py.test using pexpect.
@@ -831,10 +938,8 @@
         The pexpect child is returned.
 
         """
-        if self.request.config.getvalue("notoolsonpath"):
-            pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests")
         basetemp = self.tmpdir.mkdir("pexpect")
-        invoke = " ".join(map(str, self._getpybinargs("py.test")))
+        invoke = " ".join(map(str, self._getpytestargs()))
         cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
         return self.spawn(cmd, expect_timeout=expect_timeout)
 

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c doc/en/example/assertion/test_failures.py
--- a/doc/en/example/assertion/test_failures.py
+++ b/doc/en/example/assertion/test_failures.py
@@ -7,7 +7,7 @@
     target = testdir.tmpdir.join(failure_demo.basename)
     failure_demo.copy(target)
     failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
-    result = testdir.runpytest(target)
+    result = testdir.runpytest(target, syspathinsert=True)
     result.stdout.fnmatch_lines([
         "*42 failed*"
     ])

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c doc/en/writing_plugins.txt
--- a/doc/en/writing_plugins.txt
+++ b/doc/en/writing_plugins.txt
@@ -186,12 +186,44 @@
 If you want to look at the names of existing plugins, use
 the ``--traceconfig`` option.
 
+Testing plugins
+---------------
+
+pytest comes with some facilities that you can enable for testing your
+plugin.  Given that you have an installed plugin you can enable the
+:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a
+command line option to include the pytester plugin (``-p pytester``) or
+by putting ``pytest_plugins = pytester`` into your test or
+``conftest.py`` file.  You then will have a ``testdir`` fixure which you
+can use like this::
+
+    # content of test_myplugin.py
+
+    pytest_plugins = pytester  # to get testdir fixture
+
+    def test_myplugin(testdir):
+        testdir.makepyfile("""
+            def test_example():
+                pass
+        """)
+        result = testdir.runpytest("--verbose")
+        result.fnmatch_lines("""
+            test_example*
+        """)
+
+Note that by default ``testdir.runpytest()`` will perform a pytest
+in-process.  You can pass the command line option ``--runpytest=subprocess``
+to have it happen in a subprocess.
+
+Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more
+methods of the result object that you get from a call to ``runpytest``.
 
 .. _`writinghooks`:
 
 Writing hook functions
 ======================
 
+
 .. _validation:
 
 hook function validation and execution
@@ -493,3 +525,13 @@
 .. autoclass:: _pytest.core.CallOutcome()
     :members:
 
+.. currentmodule:: _pytest.pytester
+
+.. autoclass:: Testdir()
+    :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
+
+.. autoclass:: RunResult()
+    :members:
+
+.. autoclass:: LineMatcher()
+    :members:

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -82,7 +82,7 @@
             def test_option(pytestconfig):
                 assert pytestconfig.option.xyz == "123"
         """)
-        result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123")
+        result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines([
             '*1 passed*',
@@ -203,7 +203,7 @@
             os.chdir(os.path.dirname(os.getcwd()))
             print (py.log)
         """))
-        result = testdir.runpython(p, prepend=False)
+        result = testdir.runpython(p)
         assert not result.ret
 
     def test_issue109_sibling_conftests_not_loaded(self, testdir):
@@ -353,7 +353,8 @@
             *unrecognized*
         """)
 
-    def test_getsourcelines_error_issue553(self, testdir):
+    def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
+        monkeypatch.setattr("inspect.getsourcelines", None)
         p = testdir.makepyfile("""
             def raise_error(obj):
                 raise IOError('source code not available')

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/conftest.py
--- a/testing/conftest.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import pytest
-import sys
-
-pytest_plugins = "pytester",
-
-import os, py
-
-class LsofFdLeakChecker(object):
-    def get_open_files(self):
-        out = self._exec_lsof()
-        open_files = self._parse_lsof_output(out)
-        return open_files
-
-    def _exec_lsof(self):
-        pid = os.getpid()
-        return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
-
-    def _parse_lsof_output(self, out):
-        def isopen(line):
-            return line.startswith('f') and (
-                "deleted" not in line and 'mem' not in line and "txt" not in line and 'cwd' not in line)
-
-        open_files = []
-
-        for line in out.split("\n"):
-            if isopen(line):
-                fields = line.split('\0')
-                fd = fields[0][1:]
-                filename = fields[1][1:]
-                if filename.startswith('/'):
-                    open_files.append((fd, filename))
-
-        return open_files
-
-
-def pytest_addoption(parser):
-    parser.addoption('--lsof',
-           action="store_true", dest="lsof", default=False,
-           help=("run FD checks if lsof is available"))
-
-def pytest_runtest_setup(item):
-    config = item.config
-    config._basedir = py.path.local()
-    if config.getvalue("lsof"):
-        try:
-            config._fd_leak_checker = LsofFdLeakChecker()
-            config._openfiles = config._fd_leak_checker.get_open_files()
-        except py.process.cmdexec.Error:
-            pass
-
-#def pytest_report_header():
-#    return "pid: %s" % os.getpid()
-
-def check_open_files(config):
-    lines2 = config._fd_leak_checker.get_open_files()
-    new_fds = set([t[0] for t in lines2]) - set([t[0] for t in config._openfiles])
-    open_files = [t for t in lines2 if t[0] in new_fds]
-    if open_files:
-        error = []
-        error.append("***** %s FD leakage detected" % len(open_files))
-        error.extend([str(f) for f in open_files])
-        error.append("*** Before:")
-        error.extend([str(f) for f in config._openfiles])
-        error.append("*** After:")
-        error.extend([str(f) for f in lines2])
-        error.append(error[0])
-        raise AssertionError("\n".join(error))
-
- at pytest.hookimpl_opts(hookwrapper=True, trylast=True)
-def pytest_runtest_teardown(item):
-    yield
-    item.config._basedir.chdir()
-    if hasattr(item.config, '_openfiles'):
-        check_open_files(item.config)
-
-# XXX copied from execnet's conftest.py - needs to be merged
-winpymap = {
-    'python2.7': r'C:\Python27\python.exe',
-    'python2.6': r'C:\Python26\python.exe',
-    'python3.1': r'C:\Python31\python.exe',
-    'python3.2': r'C:\Python32\python.exe',
-    'python3.3': r'C:\Python33\python.exe',
-    'python3.4': r'C:\Python34\python.exe',
-    'python3.5': r'C:\Python35\python.exe',
-}
-
-def getexecutable(name, cache={}):
-    try:
-        return cache[name]
-    except KeyError:
-        executable = py.path.local.sysfind(name)
-        if executable:
-            if name == "jython":
-                import subprocess
-                popen = subprocess.Popen([str(executable), "--version"],
-                    universal_newlines=True, stderr=subprocess.PIPE)
-                out, err = popen.communicate()
-                if not err or "2.5" not in err:
-                    executable = None
-                if "2.5.2" in err:
-                    executable = None # http://bugs.jython.org/issue1790
-        cache[name] = executable
-        return executable
-
- at pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
-                        'pypy', 'pypy3'])
-def anypython(request):
-    name = request.param
-    executable = getexecutable(name)
-    if executable is None:
-        if sys.platform == "win32":
-            executable = winpymap.get(name, None)
-            if executable:
-                executable = py.path.local(executable)
-                if executable.check():
-                    return executable
-        pytest.skip("no suitable %s found" % (name,))
-    return executable

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/collect.py
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -627,9 +627,7 @@
     sub1.join("test_in_sub1.py").write("def test_1(): pass")
     sub2.join("test_in_sub2.py").write("def test_2(): pass")
     result = testdir.runpytest("-v", "-s")
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result.assert_outcomes(passed=2)
 
 def test_modulecol_roundtrip(testdir):
     modcol = testdir.getmodulecol("pass", withinit=True)

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/fixture.py
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -100,9 +100,7 @@
         sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
         sub2.join("test_in_sub2.py").write("def test_2(arg2): pass")
         result = testdir.runpytest("-v")
-        result.stdout.fnmatch_lines([
-            "*2 passed*"
-        ])
+        result.assert_outcomes(passed=2)
 
     def test_extend_fixture_module_class(self, testdir):
         testfile = testdir.makepyfile("""

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/python/metafunc.py
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -292,9 +292,7 @@
         """)
         result = testdir.runpytest()
         assert result.ret == 1
-        result.stdout.fnmatch_lines([
-            "*6 fail*",
-        ])
+        result.assert_outcomes(failed=6)
 
     def test_parametrize_CSV(self, testdir):
         testdir.makepyfile("""
@@ -375,7 +373,7 @@
                     assert metafunc.cls == TestClass
         """)
         result = testdir.runpytest(p, "-v")
-        result.assertoutcome(passed=2)
+        result.assert_outcomes(passed=2)
 
     def test_addcall_with_two_funcargs_generators(self, testdir):
         testdir.makeconftest("""
@@ -430,9 +428,7 @@
                     pass
         """)
         result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 pass*",
-        ])
+        result.assert_outcomes(passed=1)
 
 
     def test_generate_plugin_and_module(self, testdir):
@@ -506,9 +502,7 @@
                     self.val = 1
             """)
         result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines([
-            "*1 pass*",
-        ])
+        result.assert_outcomes(passed=1)
 
     def test_parametrize_functional2(self, testdir):
         testdir.makepyfile("""
@@ -653,8 +647,8 @@
             def test_function():
                 pass
         """)
-        reprec = testdir.inline_run()
-        reprec.assertoutcome(passed=1)
+        reprec = testdir.runpytest()
+        reprec.assert_outcomes(passed=1)
 
     def test_generate_tests_only_done_in_subdir(self, testdir):
         sub1 = testdir.mkpydir("sub1")
@@ -670,9 +664,7 @@
         sub1.join("test_in_sub1.py").write("def test_1(): pass")
         sub2.join("test_in_sub2.py").write("def test_2(): pass")
         result = testdir.runpytest("-v", "-s", sub1, sub2, sub1)
-        result.stdout.fnmatch_lines([
-            "*3 passed*"
-        ])
+        result.assert_outcomes(passed=3)
 
     def test_generate_same_function_names_issue403(self, testdir):
         testdir.makepyfile("""
@@ -687,8 +679,8 @@
             test_x = make_tests()
             test_y = make_tests()
         """)
-        reprec = testdir.inline_run()
-        reprec.assertoutcome(passed=4)
+        reprec = testdir.runpytest()
+        reprec.assert_outcomes(passed=4)
 
     @pytest.mark.issue463
     def test_parameterize_misspelling(self, testdir):

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_assertion.py
--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -461,7 +461,7 @@
                    ("--assert=plain", "--nomagic"),
                    ("--assert=plain", "--no-assert", "--nomagic"))
     for opt in off_options:
-        result = testdir.runpytest(*opt)
+        result = testdir.runpytest_subprocess(*opt)
         assert "3 == 4" not in result.stdout.str()
 
 def test_old_assert_mode(testdir):
@@ -469,7 +469,7 @@
         def test_in_old_mode():
             assert "@py_builtins" not in globals()
     """)
-    result = testdir.runpytest("--assert=reinterp")
+    result = testdir.runpytest_subprocess("--assert=reinterp")
     assert result.ret == 0
 
 def test_triple_quoted_string_issue113(testdir):

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_assertrewrite.py
--- a/testing/test_assertrewrite.py
+++ b/testing/test_assertrewrite.py
@@ -453,7 +453,7 @@
                 assert not os.path.exists(__cached__)
                 assert not os.path.exists(os.path.dirname(__cached__))""")
         monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
-        assert testdir.runpytest().ret == 0
+        assert testdir.runpytest_subprocess().ret == 0
 
     @pytest.mark.skipif('"__pypy__" in sys.modules')
     def test_pyc_vs_pyo(self, testdir, monkeypatch):
@@ -468,12 +468,12 @@
         tmp = "--basetemp=%s" % p
         monkeypatch.setenv("PYTHONOPTIMIZE", "2")
         monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
-        assert testdir.runpybin("py.test", tmp).ret == 0
+        assert testdir.runpytest_subprocess(tmp).ret == 0
         tagged = "test_pyc_vs_pyo." + PYTEST_TAG
         assert tagged + ".pyo" in os.listdir("__pycache__")
         monkeypatch.undo()
         monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
-        assert testdir.runpybin("py.test", tmp).ret == 1
+        assert testdir.runpytest_subprocess(tmp).ret == 1
         assert tagged + ".pyc" in os.listdir("__pycache__")
 
     def test_package(self, testdir):
@@ -615,10 +615,8 @@
         testdir.makepyfile(**contents)
         testdir.maketxtfile(**{'testpkg/resource': "Load me please."})
 
-        result = testdir.runpytest()
-        result.stdout.fnmatch_lines([
-            '* 1 passed*',
-        ])
+        result = testdir.runpytest_subprocess()
+        result.assert_outcomes(passed=1)
 
     def test_read_pyc(self, tmpdir):
         """

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_capture.py
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -282,7 +282,7 @@
                 logging.basicConfig(stream=stream)
                 stream.close() # to free memory/release resources
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stderr.str().find("atexit") == -1
 
     def test_logging_and_immediate_setupteardown(self, testdir):
@@ -301,7 +301,7 @@
         """)
         for optargs in (('--capture=sys',), ('--capture=fd',)):
             print (optargs)
-            result = testdir.runpytest(p, *optargs)
+            result = testdir.runpytest_subprocess(p, *optargs)
             s = result.stdout.str()
             result.stdout.fnmatch_lines([
                 "*WARN*hello3",  # errors show first!
@@ -327,7 +327,7 @@
         """)
         for optargs in (('--capture=sys',), ('--capture=fd',)):
             print (optargs)
-            result = testdir.runpytest(p, *optargs)
+            result = testdir.runpytest_subprocess(p, *optargs)
             s = result.stdout.str()
             result.stdout.fnmatch_lines([
                 "*WARN*hello3",  # errors come first
@@ -348,7 +348,7 @@
                 logging.warn("hello432")
                 assert 0
         """)
-        result = testdir.runpytest(
+        result = testdir.runpytest_subprocess(
             p, "--traceconfig",
             "-p", "no:capturelog")
         assert result.ret != 0
@@ -364,7 +364,7 @@
                 logging.warn("hello435")
         """)
         # make sure that logging is still captured in tests
-        result = testdir.runpytest("-s", "-p", "no:capturelog")
+        result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
         assert result.ret == 0
         result.stderr.fnmatch_lines([
             "WARNING*hello435*",
@@ -383,7 +383,7 @@
                 logging.warn("hello433")
                 assert 0
         """)
-        result = testdir.runpytest(p, "-p", "no:capturelog")
+        result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
         assert result.ret != 0
         result.stdout.fnmatch_lines([
             "WARNING*hello433*",
@@ -461,7 +461,7 @@
                 os.write(1, str(42).encode('ascii'))
                 raise KeyboardInterrupt()
         """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         result.stdout.fnmatch_lines([
             "*KeyboardInterrupt*"
         ])
@@ -474,7 +474,7 @@
             def test_log(capsys):
                 logging.error('x')
             """)
-        result = testdir.runpytest(p)
+        result = testdir.runpytest_subprocess(p)
         assert 'closed' not in result.stderr.str()
 
 
@@ -500,7 +500,7 @@
         def test_hello(capfd):
             pass
     """)
-    result = testdir.runpytest("--capture=no")
+    result = testdir.runpytest_subprocess("--capture=no")
     result.stdout.fnmatch_lines([
         "*1 skipped*"
     ])
@@ -563,9 +563,7 @@
             test_foo()
         """)
     result = testdir.runpytest('--assert=plain')
-    result.stdout.fnmatch_lines([
-        '*2 passed*',
-    ])
+    result.assert_outcomes(passed=2)
 
 
 class TestTextIO:
@@ -885,7 +883,7 @@
                 os.write(1, "hello\\n".encode("ascii"))
                 assert 0
         """)
-        result = testdir.runpytest()
+        result = testdir.runpytest_subprocess()
         result.stdout.fnmatch_lines("""
             *test_x*
             *assert 0*
@@ -936,7 +934,7 @@
                 cap = StdCaptureFD(out=False, err=False, in_=True)
                 cap.stop_capturing()
         """)
-        result = testdir.runpytest("--capture=fd")
+        result = testdir.runpytest_subprocess("--capture=fd")
         assert result.ret == 0
         assert result.parseoutcomes()['passed'] == 3
 
@@ -971,7 +969,7 @@
             os.write(1, b"hello\\n")
             assert 0
     """)
-    result = testdir.runpytest()
+    result = testdir.runpytest_subprocess()
     result.stdout.fnmatch_lines("""
         *test_capture_again*
         *assert 0*

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_collection.py
--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -296,7 +296,6 @@
         subdir.ensure("__init__.py")
         target = subdir.join(p.basename)
         p.move(target)
-        testdir.chdir()
         subdir.chdir()
         config = testdir.parseconfig(p.basename)
         rcol = Session(config=config)
@@ -313,7 +312,7 @@
     def test_collect_topdir(self, testdir):
         p = testdir.makepyfile("def test_func(): pass")
         id = "::".join([p.basename, "test_func"])
-        # XXX migrate to inline_genitems? (see below)
+        # XXX migrate to collectonly? (see below)
         config = testdir.parseconfig(id)
         topdir = testdir.tmpdir
         rcol = Session(config)
@@ -470,7 +469,6 @@
             assert col.config is config
 
     def test_pkgfile(self, testdir):
-        testdir.chdir()
         tmpdir = testdir.tmpdir
         subdir = tmpdir.join("subdir")
         x = subdir.ensure("x.py")

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -75,7 +75,7 @@
             [pytest]
             addopts = --qwe
         """)
-        result = testdir.runpytest("--confcutdir=.")
+        result = testdir.inline_run("--confcutdir=.")
         assert result.ret == 0
 
 class TestConfigCmdlineParsing:

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_core.py
--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -961,7 +961,7 @@
         """)
         p.copy(p.dirpath("skipping2.py"))
         monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
-        result = testdir.runpytest("-rw", "-p", "skipping1", "--traceconfig")
+        result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines([
             "WI1*skipped plugin*skipping1*hello*",
@@ -990,7 +990,7 @@
                 assert plugin is not None
         """)
         monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
-        result = testdir.runpytest(p)
+        result = testdir.runpytest(p, syspathinsert=True)
         assert result.ret == 0
         result.stdout.fnmatch_lines(["*1 passed*"])
 

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_doctest.py
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -1,5 +1,5 @@
 from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
-import py, pytest
+import py
 
 class TestDoctests:
 
@@ -75,8 +75,6 @@
             assert isinstance(items[0].parent, DoctestModule)
             assert items[0].parent is items[1].parent
 
-    @pytest.mark.xfail('hasattr(sys, "pypy_version_info")', reason=
-                       "pypy leaks one FD")
     def test_simple_doctestfile(self, testdir):
         p = testdir.maketxtfile(test_doc="""
             >>> x = 1

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_genscript.py
--- a/testing/test_genscript.py
+++ b/testing/test_genscript.py
@@ -16,7 +16,6 @@
         assert self.script.check()
 
     def run(self, anypython, testdir, *args):
-        testdir.chdir()
         return testdir._run(anypython, self.script, *args)
 
 def test_gen(testdir, anypython, standalone):

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_helpconfig.py
--- a/testing/test_helpconfig.py
+++ b/testing/test_helpconfig.py
@@ -53,14 +53,14 @@
     ])
 
 def test_debug(testdir, monkeypatch):
-    result = testdir.runpytest("--debug")
+    result = testdir.runpytest_subprocess("--debug")
     assert result.ret == 0
     p = testdir.tmpdir.join("pytestdebug.log")
     assert "pytest_sessionstart" in p.read()
 
 def test_PYTEST_DEBUG(testdir, monkeypatch):
     monkeypatch.setenv("PYTEST_DEBUG", "1")
-    result = testdir.runpytest()
+    result = testdir.runpytest_subprocess()
     assert result.ret == 0
     result.stderr.fnmatch_lines([
         "*pytest_plugin_registered*",

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_nose.py
--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -19,9 +19,7 @@
         test_hello.teardown = lambda: l.append(2)
     """)
     result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result.assert_outcomes(passed=2)
 
 
 def test_setup_func_with_setup_decorator():
@@ -66,9 +64,7 @@
 
     """)
     result = testdir.runpytest(p, '-p', 'nose')
-    result.stdout.fnmatch_lines([
-        "*2 passed*"
-    ])
+    result.assert_outcomes(passed=2)
 
 
 def test_nose_setup_func_failure(testdir):
@@ -302,7 +298,7 @@
                 pass
         """)
     result = testdir.runpytest()
-    result.stdout.fnmatch_lines("*1 passed*")
+    result.assert_outcomes(passed=1)
 
 @pytest.mark.skipif("sys.version_info < (2,6)")
 def test_setup_teardown_linking_issue265(testdir):
@@ -327,8 +323,8 @@
                 """Undoes the setup."""
                 raise Exception("should not call teardown for skipped tests")
         ''')
-    reprec = testdir.inline_run()
-    reprec.assertoutcome(passed=1, skipped=1)
+    reprec = testdir.runpytest()
+    reprec.assert_outcomes(passed=1, skipped=1)
 
 
 def test_SkipTest_during_collection(testdir):
@@ -339,7 +335,7 @@
             assert False
         """)
     result = testdir.runpytest(p)
-    result.assertoutcome(skipped=1)
+    result.assert_outcomes(skipped=1)
 
 
 def test_SkipTest_in_test(testdir):

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_pdb.py
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -2,6 +2,13 @@
 import py
 import sys
 
+def runpdb_and_get_report(testdir, source):
+    p = testdir.makepyfile(source)
+    result = testdir.runpytest_inprocess("--pdb", p)
+    reports = result.reprec.getreports("pytest_runtest_logreport")
+    assert len(reports) == 3, reports # setup/call/teardown
+    return reports[1]
+
 
 class TestPDB:
     def pytest_funcarg__pdblist(self, request):
@@ -14,7 +21,7 @@
         return pdblist
 
     def test_pdb_on_fail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             def test_func():
                 assert 0
         """)
@@ -24,7 +31,7 @@
         assert tb[-1].name == "test_func"
 
     def test_pdb_on_xfail(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import pytest
             @pytest.mark.xfail
             def test_func():
@@ -34,7 +41,7 @@
         assert not pdblist
 
     def test_pdb_on_skip(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import pytest
             def test_func():
                 pytest.skip("hello")
@@ -43,7 +50,7 @@
         assert len(pdblist) == 0
 
     def test_pdb_on_BdbQuit(self, testdir, pdblist):
-        rep = testdir.inline_runsource1('--pdb', """
+        rep = runpdb_and_get_report(testdir, """
             import bdb
             def test_func():
                 raise bdb.BdbQuit
@@ -260,7 +267,7 @@
 
     def test_pdb_collection_failure_is_shown(self, testdir):
         p1 = testdir.makepyfile("""xxx """)
-        result = testdir.runpytest("--pdb", p1)
+        result = testdir.runpytest_subprocess("--pdb", p1)
         result.stdout.fnmatch_lines([
             "*NameError*xxx*",
             "*1 error*",

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_pytester.py
--- a/testing/test_pytester.py
+++ b/testing/test_pytester.py
@@ -69,9 +69,7 @@
             assert 1
     """)
     result = testdir.runpytest()
-    result.stdout.fnmatch_lines([
-        "*1 passed*"
-    ])
+    result.assert_outcomes(passed=1)
 
 
 def make_holder():
@@ -114,16 +112,6 @@
         unichr = chr
     testdir.makepyfile(unichr(0xfffd))
 
-def test_inprocess_plugins(testdir):
-    class Plugin(object):
-        configured = False
-        def pytest_configure(self, config):
-            self.configured = True
-    plugin = Plugin()
-    testdir.inprocess_run([], [plugin])
-
-    assert plugin.configured
-
 def test_inline_run_clean_modules(testdir):
     test_mod = testdir.makepyfile("def test_foo(): assert True")
     result = testdir.inline_run(str(test_mod))

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c testing/test_session.py
--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -203,7 +203,6 @@
 
 
 def test_plugin_specify(testdir):
-    testdir.chdir()
     pytest.raises(ImportError, """
             testdir.parseconfig("-p", "nqweotexistent")
     """)

diff -r a2dfd7c1fb40818cf8b61e17ebf30b0ed287918e -r 7d4a0b78d19b985ccca88827129d825151ad494c tox.ini
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 distshare={homedir}/.tox/distshare
-envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,py27-trial,py33-trial,doctesting,py27-cxfreeze
+envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,{py27,py33}-trial,py27-subprocess,doctesting,py27-cxfreeze
 
 [testenv]
 changedir=testing
@@ -9,6 +9,15 @@
     nose
     mock
 
+[testenv:py27-subprocess]
+changedir=.
+basepython=python2.7
+deps=pytest-xdist
+    mock
+    nose
+commands=
+  py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing}
+
 [testenv:genscript]
 changedir=.
 commands= py.test --genscript=pytest1
@@ -136,7 +145,7 @@
 minversion=2.0
 plugins=pytester
 #--pyargs --doctest-modules --ignore=.tox
-addopts= -rxsX 
+addopts= -rxsX -p pytester
 rsyncdirs=tox.ini pytest.py _pytest testing
 python_files=test_*.py *_test.py testing/*/*.py
 python_classes=Test Acceptance

Repository URL: https://bitbucket.org/pytest-dev/pytest/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the pytest-commit mailing list