[py-svn] pytest commit 8431fad888a4: streamline docs, especially use "import pytest" and "pytest.*" in python code examples instead of "import py" and "py.test.*".

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Nov 18 14:58:31 CET 2010


# HG changeset patch -- Bitbucket.org
# Project pytest
# URL http://bitbucket.org/hpk42/pytest/overview
# User holger krekel <holger at merlinux.eu>
# Date 1290028336 -3600
# Node ID 8431fad888a4d0fd556456a89198f2d47722a8b9
# Parent  9810db0ef538c51439a9d359c4e6202ca6e04d17
streamline docs, especially use "import pytest" and "pytest.*" in python code examples instead of "import py" and "py.test.*".

--- a/testing/test_pastebin.py
+++ b/testing/test_pastebin.py
@@ -12,13 +12,13 @@ class TestPasting:
 
     def test_failed(self, testdir, pastebinlist):
         testpath = testdir.makepyfile("""
-            import py
+            import pytest
             def test_pass():
                 pass
             def test_fail():
                 assert 0
             def test_skip():
-                py.test.skip("")
+                pytest.skip("")
         """)
         reprec = testdir.inline_run(testpath, "--paste=failed")
         assert len(pastebinlist) == 1
@@ -29,13 +29,13 @@ class TestPasting:
 
     def test_all(self, testdir, pastebinlist):
         testpath = testdir.makepyfile("""
-            import py
+            import pytest
             def test_pass():
                 pass
             def test_fail():
                 assert 0
             def test_skip():
-                py.test.skip("")
+                pytest.skip("")
         """)
         reprec = testdir.inline_run(testpath, "--pastebin=all")
         assert reprec.countoutcomes() == [1,1,1]

--- a/doc/builtin.txt
+++ b/doc/builtin.txt
@@ -1,8 +1,8 @@
 
-py.test builtin helpers
+pytest builtin helpers
 ================================================
 
-builtin py.test.* helpers
+builtin pytest.* helpers
 -----------------------------------------------------
 
 You can always use an interactive Python prompt and type::
@@ -28,25 +28,25 @@ You can ask for available builtin or pro
         captures writes to sys.stdout/sys.stderr and makes
         them available successively via a ``capsys.readouterr()`` method
         which returns a ``(out, err)`` tuple of captured snapshot strings.
-        
+
     capfd
         captures writes to file descriptors 1 and 2 and makes
         snapshotted ``(out, err)`` string tuples available
         via the ``capsys.readouterr()`` method.  If the underlying
         platform does not have ``os.dup`` (e.g. Jython) tests using
         this funcarg will automatically skip.
-        
+
     tmpdir
         return a temporary directory path object
         unique to each test function invocation,
         created as a sub directory of the base temporary
         directory.  The returned object is a `py.path.local`_
         path object.
-        
+
     monkeypatch
         The returned ``monkeypatch`` funcarg provides these
         helper methods to modify objects, dictionaries or os.environ::
-        
+
         monkeypatch.setattr(obj, name, value, raising=True)
         monkeypatch.delattr(obj, name, raising=True)
         monkeypatch.setitem(mapping, name, value)
@@ -54,15 +54,15 @@ You can ask for available builtin or pro
         monkeypatch.setenv(name, value, prepend=False)
         monkeypatch.delenv(name, value, raising=True)
         monkeypatch.syspath_prepend(path)
-        
+
         All modifications will be undone when the requesting
         test function finished its execution.  The ``raising``
         parameter determines if a KeyError or AttributeError
         will be raised if the set/deletion operation has no target.
-        
+
     recwarn
         Return a WarningsRecorder instance that provides these methods:
-        
+
         * ``pop(category=None)``: return last warning matching the category.
         * ``clear()``: clear list of warnings
-        
+

--- a/testing/test_terminal.py
+++ b/testing/test_terminal.py
@@ -41,11 +41,11 @@ def pytest_generate_tests(metafunc):
 class TestTerminal:
     def test_pass_skip_fail(self, testdir, option):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def test_ok():
                 pass
             def test_skip():
-                py.test.skip("xx")
+                pytest.skip("xx")
             def test_func():
                 assert 0
         """)
@@ -69,7 +69,7 @@ class TestTerminal:
     def test_internalerror(self, testdir, linecomp):
         modcol = testdir.getmodulecol("def test_one(): pass")
         rep = TerminalReporter(modcol.config, file=linecomp.stringio)
-        excinfo = py.test.raises(ValueError, "raise ValueError('hello')")
+        excinfo = pytest.raises(ValueError, "raise ValueError('hello')")
         rep.pytest_internalerror(excinfo.getrepr())
         linecomp.assert_contains_lines([
             "INTERNALERROR> *ValueError*hello*"
@@ -136,7 +136,7 @@ class TestTerminal:
             def test_foobar():
                 assert 0
             def test_spamegg():
-                import py; py.test.skip('skip me please!')
+                import py; pytest.skip('skip me please!')
             def test_interrupt_me():
                 raise KeyboardInterrupt   # simulating the user
         """)
@@ -180,8 +180,8 @@ class TestCollectonly:
 
     def test_collectonly_skipped_module(self, testdir, linecomp):
         modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
-            import py
-            py.test.skip("nomod")
+            import pytest
+            pytest.skip("nomod")
         """)
         rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
         modcol.config.pluginmanager.register(rep)
@@ -335,13 +335,13 @@ class TestTerminalFunctional:
 
     def test_no_skip_summary_if_failure(self, testdir):
         testdir.makepyfile("""
-            import py
+            import pytest
             def test_ok():
                 pass
             def test_fail():
                 assert 0
             def test_skip():
-                py.test.skip("dontshow")
+                pytest.skip("dontshow")
         """)
         result = testdir.runpytest()
         assert result.stdout.str().find("skip test summary") == -1
@@ -397,14 +397,14 @@ class TestTerminalFunctional:
 
     def test_verbose_reporting(self, testdir, pytestconfig):
         p1 = testdir.makepyfile("""
-            import py
+            import pytest
             def test_fail():
                 raise ValueError()
             def test_pass():
                 pass
             class TestClass:
                 def test_skip(self):
-                    py.test.skip("hello")
+                    pytest.skip("hello")
             def test_gen():
                 def check(x):
                     assert x == 1
@@ -562,7 +562,7 @@ class TestGenericReporting:
 
     def test_tb_option(self, testdir, option):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def g():
                 raise IndexError
             def test_func():
@@ -587,7 +587,7 @@ class TestGenericReporting:
 
     def test_tb_crashline(self, testdir, option):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def g():
                 raise IndexError
             def test_func1():
@@ -620,7 +620,7 @@ def pytest_report_header(config):
             "*hello: info*",
         ])
 
- at py.test.mark.xfail("not hasattr(os, 'dup')")
+ at pytest.mark.xfail("not hasattr(os, 'dup')")
 def test_fdopen_kept_alive_issue124(testdir):
     testdir.makepyfile("""
         import os, sys

--- a/doc/funcargs.txt
+++ b/doc/funcargs.txt
@@ -34,7 +34,7 @@ Running the test looks like this::
 
     $ py.test test_simplefactory.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_simplefactory.py
     
     test_simplefactory.py F
@@ -136,7 +136,7 @@ Running this::
 
     $ py.test test_example.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_example.py
     
     test_example.py .........F
@@ -151,7 +151,7 @@ Running this::
     E       assert 9 < 9
     
     test_example.py:7: AssertionError
-    ==================== 1 failed, 9 passed in 0.04 seconds ====================
+    ==================== 1 failed, 9 passed in 0.03 seconds ====================
 
 Note that the ``pytest_generate_tests(metafunc)`` hook is called during
 the test collection phase which is separate from the actual test running.
@@ -174,10 +174,10 @@ If you want to select only the run with 
 
     $ py.test -v -k 7 test_example.py  # or -k test_func[7]
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22 -- /home/hpk/venv/0/bin/python
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 -- /home/hpk/venv/0/bin/python
     test path 1: test_example.py
     
-    test_example.py <- test_example.py:6: test_func[7] PASSED
+    test_example.py:6: test_func[7] PASSED
     
     ======================== 9 tests deselected by '7' =========================
     ================== 1 passed, 9 deselected in 0.01 seconds ==================

--- a/doc/mark.txt
+++ b/doc/mark.txt
@@ -6,7 +6,7 @@ mark test functions with attributes
 
 .. currentmodule:: _pytest.mark
 
-By using the ``py.test.mark`` helper you can instantiate
+By using the ``pytest.mark`` helper you can instantiate
 decorators that will set named meta data on test functions.
 
 Marking a single function
@@ -14,22 +14,22 @@ Marking a single function
 
 You can "mark" a test function with meta data like this::
 
-    import py
-    @py.test.mark.webtest
+    import pytest
+    @pytest.mark.webtest
     def test_send_http():
         ...
 
-This will set the function attribute ``webtest`` to a :py:class:`MarkInfo` 
+This will set the function attribute ``webtest`` to a :py:class:`MarkInfo`
 instance.  You can also specify parametrized meta data like this::
 
     # content of test_mark.py
 
-    import py
-    @py.test.mark.webtest(firefox=30)
+    import pytest
+    @pytest.mark.webtest(firefox=30)
     def test_receive():
         pass
 
-    @py.test.mark.webtest("functional", firefox=30)
+    @pytest.mark.webtest("functional", firefox=30)
     def test_run_and_look():
         pass
 
@@ -43,12 +43,12 @@ and access it from other places like thi
 Marking whole classes or modules
 ----------------------------------------------------
 
-If you are programming with Python2.6 you may use ``py.test.mark`` decorators
+If you are programming with Python2.6 you may use ``pytest.mark`` decorators
 with classes to apply markers to all its test methods::
 
     # content of test_mark_classlevel.py
-    import py
-    @py.test.mark.webtest
+    import pytest
+    @pytest.mark.webtest
     class TestClass:
         def test_startup(self):
             pass
@@ -61,22 +61,22 @@ two test functions.
 To remain compatible with Python2.5 you can also set a
 ``pytestmark`` attribute on a TestClass like this::
 
-    import py
+    import pytest
 
     class TestClass:
-        pytestmark = py.test.mark.webtest
+        pytestmark = pytest.mark.webtest
 
 or if you need to use multiple markers you can use a list::
 
-    import py
+    import pytest
 
     class TestClass:
-        pytestmark = [py.test.mark.webtest, pytest.mark.slowtest]
+        pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
 
 You can also set a module level marker::
 
-    import py
-    pytestmark = py.test.mark.webtest
+    import pytest
+    pytestmark = pytest.mark.webtest
 
 in which case it will be applied to all functions and
 methods defined in the module.
@@ -88,20 +88,20 @@ You can use the ``-k`` command line opti
 
     $ py.test -k webtest  # running with the above defined examples yields
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-    test path 1: /tmp/doc-exec-527
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: /tmp/doc-exec-74
     
     test_mark.py ..
     test_mark_classlevel.py ..
     
-    ========================= 4 passed in 0.02 seconds =========================
+    ========================= 4 passed in 0.01 seconds =========================
 
 And you can also run all tests except the ones that match the keyword::
 
     $ py.test -k-webtest
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-    test path 1: /tmp/doc-exec-527
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: /tmp/doc-exec-74
     
     ===================== 4 tests deselected by '-webtest' =====================
     ======================= 4 deselected in 0.01 seconds =======================
@@ -110,8 +110,8 @@ Or to only select the class::
 
     $ py.test -kTestClass
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-    test path 1: /tmp/doc-exec-527
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: /tmp/doc-exec-74
     
     test_mark_classlevel.py ..
     

--- a/doc/unittest.txt
+++ b/doc/unittest.txt
@@ -24,7 +24,7 @@ Running it yields::
 
     $ py.test test_unittest.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_unittest.py
     
     test_unittest.py F
@@ -56,7 +56,7 @@ Running it yields::
     /usr/lib/python2.6/unittest.py:350: AssertionError
     ----------------------------- Captured stdout ------------------------------
     hello
-    ========================= 1 failed in 0.12 seconds =========================
+    ========================= 1 failed in 0.02 seconds =========================
 
 .. _`unittest.py style`: http://docs.python.org/library/unittest.html
 

--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -1,4 +1,4 @@
-import py, os
+import pytest, py, os
 from _pytest.core import PluginManager, canonical_importname
 from _pytest.core import MultiCall, HookRelay, varnames
 
@@ -7,18 +7,18 @@ class TestBootstrapping:
     def test_consider_env_fails_to_import(self, monkeypatch):
         pluginmanager = PluginManager()
         monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
-        py.test.raises(ImportError, "pluginmanager.consider_env()")
+        pytest.raises(ImportError, "pluginmanager.consider_env()")
 
     def test_preparse_args(self):
         pluginmanager = PluginManager()
-        py.test.raises(ImportError, """
+        pytest.raises(ImportError, """
             pluginmanager.consider_preparse(["xyz", "-p", "hello123"])
         """)
 
     def test_plugin_skip(self, testdir, monkeypatch):
         p = testdir.makepyfile(pytest_skipping1="""
-            import py
-            py.test.skip("hello")
+            import pytest
+            pytest.skip("hello")
         """)
         p.copy(p.dirpath("pytest_skipping2.py"))
         monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
@@ -73,7 +73,7 @@ class TestBootstrapping:
     def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
         x500 = testdir.makepyfile(pytest_x500="#")
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def test_hello(pytestconfig):
                 plugin = pytestconfig.pluginmanager.getplugin('x500')
                 assert plugin is not None
@@ -85,8 +85,8 @@ class TestBootstrapping:
 
     def test_import_plugin_importname(self, testdir):
         pluginmanager = PluginManager()
-        py.test.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
-        py.test.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")')
+        pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
+        pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")')
 
         reset = testdir.syspathinsert()
         pluginname = "pytest_hello"
@@ -103,8 +103,8 @@ class TestBootstrapping:
 
     def test_import_plugin_dotted_name(self, testdir):
         pluginmanager = PluginManager()
-        py.test.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
-        py.test.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")')
+        pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
+        pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")')
 
         reset = testdir.syspathinsert()
         testdir.mkpydir("pkg").join("plug.py").write("x=3")
@@ -148,7 +148,7 @@ class TestBootstrapping:
     def test_consider_conftest_deps(self, testdir):
         mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
         pp = PluginManager()
-        py.test.raises(ImportError, "pp.consider_conftest(mod)")
+        pytest.raises(ImportError, "pp.consider_conftest(mod)")
 
     def test_pm(self):
         pp = PluginManager()
@@ -187,10 +187,10 @@ class TestBootstrapping:
         assert pp.isregistered(mod)
         l = pp.getplugins()
         assert mod in l
-        py.test.raises(AssertionError, "pp.register(mod)")
+        pytest.raises(AssertionError, "pp.register(mod)")
         mod2 = py.std.types.ModuleType("pytest_hello")
         #pp.register(mod2) # double pm
-        py.test.raises(AssertionError, "pp.register(mod)")
+        pytest.raises(AssertionError, "pp.register(mod)")
         #assert not pp.isregistered(mod2)
         assert pp.getplugins() == l
 
@@ -208,14 +208,14 @@ class TestBootstrapping:
         class hello:
             def pytest_gurgel(self):
                 pass
-        py.test.raises(Exception, "pp.register(hello())")
+        pytest.raises(Exception, "pp.register(hello())")
 
     def test_register_mismatch_arg(self):
         pp = PluginManager(load=True)
         class hello:
             def pytest_configure(self, asd):
                 pass
-        excinfo = py.test.raises(Exception, "pp.register(hello())")
+        excinfo = pytest.raises(Exception, "pp.register(hello())")
 
     def test_canonical_importname(self):
         for name in 'xyz', 'pytest_xyz', 'pytest_Xyz', 'Xyz':
@@ -223,7 +223,7 @@ class TestBootstrapping:
 
     def test_notify_exception(self, capfd):
         pp = PluginManager()
-        excinfo = py.test.raises(ValueError, "raise ValueError(1)")
+        excinfo = pytest.raises(ValueError, "raise ValueError(1)")
         pp.notify_exception(excinfo)
         out, err = capfd.readouterr()
         assert "ValueError" in err
@@ -283,7 +283,7 @@ class TestBootstrapping:
         assert pm.trace.root.indent == indent
         assert len(l) == 1
         assert 'pytest_plugin_registered' in l[0]
-        py.test.raises(ValueError, lambda: pm.register(api1()))
+        pytest.raises(ValueError, lambda: pm.register(api1()))
         assert pm.trace.root.indent == indent
         assert saveindent[0] > indent
 
@@ -420,8 +420,8 @@ class TestPytestPluginInteractions:
 
 def test_namespace_has_default_and_env_plugins(testdir):
     p = testdir.makepyfile("""
-        import py
-        py.test.mark
+        import pytest
+        pytest.mark
     """)
     result = testdir.runpython(p)
     assert result.ret == 0
@@ -493,7 +493,7 @@ class TestMultiCall:
 
     def test_tags_call_error(self):
         multicall = MultiCall([lambda x: x], {})
-        py.test.raises(TypeError, "multicall.execute()")
+        pytest.raises(TypeError, "multicall.execute()")
 
     def test_call_subexecute(self):
         def m(__multicall__):
@@ -541,7 +541,7 @@ class TestHookRelay:
             def hello(self, arg):
                 "api hook 1"
         mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
-        py.test.raises(TypeError, "mcm.hello(3)")
+        pytest.raises(TypeError, "mcm.hello(3)")
 
     def test_firstresult_definition(self):
         pm = PluginManager()

--- a/doc/skipping.txt
+++ b/doc/skipping.txt
@@ -12,7 +12,7 @@ requirement without which considering or
 not make sense.  If a test fails under all conditions then it's
 probably best to mark your test as 'xfail'.
 
-By running ``py.test -rxs`` you will see extra reporting 
+By running ``py.test -rxs`` you will see extra reporting
 information on skips and xfail-run tests at the end of a test run.
 
 .. _skipif:
@@ -23,7 +23,7 @@ Skipping a single function
 Here is an example for marking a test function to be skipped
 when run on a Python3 interpreter::
 
-    @py.test.mark.skipif("sys.version_info >= (3,0)")
+    @pytest.mark.skipif("sys.version_info >= (3,0)")
     def test_function():
         ...
 
@@ -33,14 +33,14 @@ contains the  ``sys`` and ``os`` modules
 ``config`` object.  The latter allows you to skip based
 on a test configuration value e.g. like this::
 
-    @py.test.mark.skipif("not config.getvalue('db')")
+    @pytest.mark.skipif("not config.getvalue('db')")
     def test_function(...):
         ...
 
 Create a shortcut for your conditional skip decorator
 at module level like this::
 
-    win32only = py.test.mark.skipif("sys.platform != 'win32'")
+    win32only = pytest.mark.skipif("sys.platform != 'win32'")
 
     @win32only
     def test_function():
@@ -55,7 +55,7 @@ As with all function :ref:`marking` you 
 for skipping all methods of a test class based on platform::
 
     class TestPosixCalls:
-        pytestmark = py.test.mark.skipif("sys.platform == 'win32'")
+        pytestmark = pytest.mark.skipif("sys.platform == 'win32'")
 
         def test_function(self):
             # will not be setup or run under 'win32' platform
@@ -65,7 +65,7 @@ The ``pytestmark`` decorator will be app
 If your code targets python2.6 or above you can equivalently use
 the skipif decorator on classes::
 
-    @py.test.mark.skipif("sys.platform == 'win32'")
+    @pytest.mark.skipif("sys.platform == 'win32'")
     class TestPosixCalls:
 
         def test_function(self):
@@ -86,7 +86,7 @@ mark a test function as expected to fail
 You can use the ``xfail`` marker to indicate that you
 expect the test to fail::
 
-    @py.test.mark.xfail
+    @pytest.mark.xfail
     def test_function():
         ...
 
@@ -97,21 +97,21 @@ when it fails. Instead terminal reportin
 Same as with skipif_ you can also selectively expect a failure
 depending on platform::
 
-    @py.test.mark.xfail("sys.version_info >= (3,0)")
+    @pytest.mark.xfail("sys.version_info >= (3,0)")
     def test_function():
         ...
 
 To not run a test and still regard it as "xfailed"::
 
-    @py.test.mark.xfail(..., run=False)
+    @pytest.mark.xfail(..., run=False)
 
 To specify an explicit reason to be shown with xfailure detail::
 
-    @py.test.mark.xfail(..., reason="my reason")
+    @pytest.mark.xfail(..., reason="my reason")
 
 By specifying on the commandline::
 
-    py.test --runxfail
+    pytest --runxfail
 
 you can force the running and reporting of a runnable ``xfail`` marked test.
 
@@ -124,7 +124,7 @@ within test or setup code.  Example::
 
     def test_function():
         if not valid_config():
-            py.test.xfail("unsuppored configuration")
+            pytest.xfail("unsuppored configuration")
 
 
 skipping on a missing import dependency
@@ -133,13 +133,13 @@ skipping on a missing import dependency
 You can use the following import helper at module level
 or within a test or test setup function::
 
-    docutils = py.test.importorskip("docutils")
+    docutils = pytest.importorskip("docutils")
 
 If ``docutils`` cannot be imported here, this will lead to a
 skip outcome of the test.  You can also skip dependeing if
 if a library does not come with a high enough version::
 
-    docutils = py.test.importorskip("docutils", minversion="0.3")
+    docutils = pytest.importorskip("docutils", minversion="0.3")
 
 The version will be read from the specified module's ``__version__`` attribute.
 
@@ -152,5 +152,5 @@ within test or setup code.  Example::
 
     def test_function():
         if not valid_config():
-            py.test.skip("unsuppored configuration")
+            pytest.skip("unsuppored configuration")
 

--- a/doc/assert.txt
+++ b/doc/assert.txt
@@ -21,27 +21,27 @@ assertion fails you will see the value o
 
     $ py.test test_assert1.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_assert1.py
-    
+
     test_assert1.py F
-    
+
     ================================= FAILURES =================================
     ______________________________ test_function _______________________________
-    
+
         def test_function():
     >       assert f() == 4
     E       assert 3 == 4
     E        +  where 3 = f()
-    
+
     test_assert1.py:5: AssertionError
-    ========================= 1 failed in 0.05 seconds =========================
+    ========================= 1 failed in 0.03 seconds =========================
 
 Reporting details about the failing assertion is achieved by re-evaluating
 the assert expression and recording intermediate values.
 
 Note: If evaluating the assert expression has side effects you may get a
-warning that the intermediate values could not be determined safely.  A 
+warning that the intermediate values could not be determined safely.  A
 common example for this issue is reading from a file and comparing in one
 line::
 
@@ -57,14 +57,14 @@ assertions about expected exceptions
 ------------------------------------------
 
 In order to write assertions about raised exceptions, you can use
-``py.test.raises`` as a context manager like this::
+``pytest.raises`` as a context manager like this::
 
-    with py.test.raises(ZeroDivisionError):
+    with pytest.raises(ZeroDivisionError):
         1 / 0
 
 and if you need to have access to the actual exception info you may use::
 
-    with py.test.raises(RuntimeError) as excinfo:
+    with pytest.raises(RuntimeError) as excinfo:
         def f():
             f()
         f()
@@ -74,8 +74,8 @@ and if you need to have access to the ac
 If you want to write test code that works on Python2.4 as well,
 you may also use two other ways to test for an expected exception::
 
-    py.test.raises(ExpectedException, func, *args, **kwargs)
-    py.test.raises(ExpectedException, "func(*args, **kwargs)")
+    pytest.raises(ExpectedException, func, *args, **kwargs)
+    pytest.raises(ExpectedException, "func(*args, **kwargs)")
 
 both of which execute the specified function with args and kwargs and
 asserts that the given ``ExpectedException`` is raised.  The reporter will
@@ -101,14 +101,14 @@ if you run this module::
 
     $ py.test test_assert2.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_assert2.py
-    
+
     test_assert2.py F
-    
+
     ================================= FAILURES =================================
     ___________________________ test_set_comparison ____________________________
-    
+
         def test_set_comparison():
             set1 = set("1308")
             set2 = set("8035")
@@ -118,7 +118,7 @@ if you run this module::
     E         '1'
     E         Extra items in the right set:
     E         '5'
-    
+
     test_assert2.py:5: AssertionError
     ========================= 1 failed in 0.02 seconds =========================
 
@@ -128,7 +128,7 @@ Special comparisons are done for a numbe
 * comparing long sequences: first failing indices
 * comparing dicts: different entries
 
-.. 
+..
     Defining your own comparison
     ----------------------------------------------
 

--- a/doc/tmpdir.txt
+++ b/doc/tmpdir.txt
@@ -28,7 +28,7 @@ Running this would result in a passed te
 
     $ py.test test_tmpdir.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_tmpdir.py
     
     test_tmpdir.py F
@@ -36,7 +36,7 @@ Running this would result in a passed te
     ================================= FAILURES =================================
     _____________________________ test_create_file _____________________________
     
-    tmpdir = local('/tmp/pytest-447/test_create_file0')
+    tmpdir = local('/tmp/pytest-123/test_create_file0')
     
         def test_create_file(tmpdir):
             p = tmpdir.mkdir("sub").join("hello.txt")
@@ -47,7 +47,7 @@ Running this would result in a passed te
     E       assert 0
     
     test_tmpdir.py:7: AssertionError
-    ========================= 1 failed in 0.15 seconds =========================
+    ========================= 1 failed in 0.02 seconds =========================
 
 .. _`base temporary directory`:
 

--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -1,9 +1,9 @@
 import sys
 
-import py
+import py, pytest
 import _pytest.assertion as plugin
 
-needsnewassert = py.test.mark.skipif("sys.version_info < (2,6)")
+needsnewassert = pytest.mark.skipif("sys.version_info < (2,6)")
 
 def interpret(expr):
     return py.code._reinterpret(expr, py.code.Frame(sys._getframe(1)))

--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -45,9 +45,9 @@ class SessionTests:
 
     def test_raises_output(self, testdir):
         reprec = testdir.inline_runsource("""
-            import py
+            import pytest
             def test_raises_doesnt():
-                py.test.raises(ValueError, int, "3")
+                pytest.raises(ValueError, int, "3")
         """)
         passed, skipped, failed = reprec.listoutcomes()
         assert len(failed) == 1
@@ -118,15 +118,15 @@ class SessionTests:
 
     def test_skip_file_by_conftest(self, testdir):
         testdir.makepyfile(conftest="""
-            import py
+            import pytest
             def pytest_collect_file():
-                py.test.skip("intentional")
+                pytest.skip("intentional")
         """, test_file="""
             def test_one(): pass
         """)
         try:
             reprec = testdir.inline_run(testdir.tmpdir)
-        except py.test.skip.Exception:
+        except pytest.skip.Exception:
             py.test.fail("wrong skipped caught")
         reports = reprec.getreports("pytest_collectreport")
         assert len(reports) == 1
@@ -173,8 +173,8 @@ class TestNewSession(SessionTests):
                     pass
             """,
             test_two="""
-                import py
-                py.test.skip('xxx')
+                import pytest
+                pytest.skip('xxx')
             """,
             test_three="xxxdsadsadsadsa",
             __init__=""
@@ -204,10 +204,10 @@ class TestNewSession(SessionTests):
 
 def test_plugin_specify(testdir):
     testdir.chdir()
-    config = py.test.raises(ImportError, """
+    config = pytest.raises(ImportError, """
             testdir.parseconfig("-p", "nqweotexistent")
     """)
-    #py.test.raises(ImportError,
+    #pytest.raises(ImportError,
     #    "config.pluginmanager.do_configure(config)"
     #)
 

--- a/testing/test_pytester.py
+++ b/testing/test_pytester.py
@@ -1,4 +1,4 @@
-import py
+import pytest
 import os, sys
 from _pytest.pytester import LineMatcher, LineComp, HookRecorder
 from _pytest.core import PluginManager
@@ -8,7 +8,7 @@ def test_reportrecorder(testdir):
     recorder = testdir.getreportrecorder(item.config)
     assert not recorder.getfailures()
 
-    py.test.xfail("internal reportrecorder tests need refactoring")
+    pytest.xfail("internal reportrecorder tests need refactoring")
     class rep:
         excinfo = None
         passed = False
@@ -51,10 +51,11 @@ def test_reportrecorder(testdir):
     recorder.unregister()
     recorder.clear()
     recorder.hook.pytest_runtest_logreport(report=rep)
-    py.test.raises(ValueError, "recorder.getfailures()")
+    pytest.raises(ValueError, "recorder.getfailures()")
 
 
 def test_parseconfig(testdir):
+    import py
     config1 = testdir.parseconfig()
     config2 = testdir.parseconfig()
     assert config2 != config1
@@ -81,7 +82,7 @@ def test_hookrecorder_basic():
     call = rec.popcall("pytest_xyz")
     assert call.arg == 123
     assert call._name == "pytest_xyz"
-    py.test.raises(ValueError, "rec.popcall('abc')")
+    pytest.raises(ValueError, "rec.popcall('abc')")
 
 def test_hookrecorder_basic_no_args_hook():
     rec = HookRecorder(PluginManager())
@@ -96,7 +97,7 @@ def test_hookrecorder_basic_no_args_hook
 
 def test_functional(testdir, linecomp):
     reprec = testdir.inline_runsource("""
-        import py
+        import pytest
         from _pytest.core import HookRelay, PluginManager
         pytest_plugins="pytester"
         def test_func(_pytest):

--- a/doc/doctest.txt
+++ b/doc/doctest.txt
@@ -44,7 +44,7 @@ then you can just invoke ``py.test`` wit
 
     $ py.test
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-    test path 1: /tmp/doc-exec-519
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: /tmp/doc-exec-66
     
     =============================  in 0.00 seconds =============================

--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -1,4 +1,4 @@
-import py
+import pytest
 
 from _pytest.skipping import MarkEvaluator, folded_skips
 from _pytest.skipping import pytest_runtest_setup
@@ -13,8 +13,8 @@ class TestEvaluator:
 
     def test_marked_no_args(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz
+            import pytest
+            @pytest.mark.xyz
             def test_func():
                 pass
         """)
@@ -27,8 +27,8 @@ class TestEvaluator:
 
     def test_marked_one_arg(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz("hasattr(os, 'sep')")
+            import pytest
+            @pytest.mark.xyz("hasattr(os, 'sep')")
             def test_func():
                 pass
         """)
@@ -40,8 +40,8 @@ class TestEvaluator:
 
     def test_marked_one_arg_with_reason(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
+            import pytest
+            @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
             def test_func():
                 pass
         """)
@@ -54,12 +54,12 @@ class TestEvaluator:
 
     def test_marked_one_arg_twice(self, testdir):
         lines = [
-            '''@py.test.mark.skipif("not hasattr(os, 'murks')")''',
-            '''@py.test.mark.skipif("hasattr(os, 'murks')")'''
+            '''@pytest.mark.skipif("not hasattr(os, 'murks')")''',
+            '''@pytest.mark.skipif("hasattr(os, 'murks')")'''
         ]
         for i in range(0, 2):
             item = testdir.getitem("""
-                import py
+                import pytest
                 %s
                 %s
                 def test_func():
@@ -73,9 +73,9 @@ class TestEvaluator:
 
     def test_marked_one_arg_twice2(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.skipif("hasattr(os, 'murks')")
-            @py.test.mark.skipif("not hasattr(os, 'murks')")
+            import pytest
+            @pytest.mark.skipif("hasattr(os, 'murks')")
+            @pytest.mark.skipif("not hasattr(os, 'murks')")
             def test_func():
                 pass
         """)
@@ -87,9 +87,9 @@ class TestEvaluator:
 
     def test_skipif_class(self, testdir):
         item, = testdir.getitems("""
-            import py
+            import pytest
             class TestClass:
-                pytestmark = py.test.mark.skipif("config._hackxyz")
+                pytestmark = pytest.mark.skipif("config._hackxyz")
                 def test_func(self):
                     pass
         """)
@@ -103,8 +103,8 @@ class TestEvaluator:
 class TestXFail:
     def test_xfail_simple(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_func():
                 assert 0
         """)
@@ -117,8 +117,8 @@ class TestXFail:
 
     def test_xfail_xpassed(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_func():
                 assert 1
         """)
@@ -131,8 +131,8 @@ class TestXFail:
 
     def test_xfail_run_anyway(self, testdir):
         testdir.makepyfile("""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_func():
                 assert 0
         """)
@@ -146,8 +146,8 @@ class TestXFail:
 
     def test_xfail_evalfalse_but_fails(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.xfail('False')
+            import pytest
+            @pytest.mark.xfail('False')
             def test_func():
                 assert 0
         """)
@@ -158,8 +158,8 @@ class TestXFail:
 
     def test_xfail_not_report_default(self, testdir):
         p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_this():
                 assert 0
         """)
@@ -170,14 +170,14 @@ class TestXFail:
 
     def test_xfail_not_run_xfail_reporting(self, testdir):
         p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail(run=False, reason="noway")
+            import pytest
+            @pytest.mark.xfail(run=False, reason="noway")
             def test_this():
                 assert 0
-            @py.test.mark.xfail("True", run=False)
+            @pytest.mark.xfail("True", run=False)
             def test_this_true():
                 assert 0
-            @py.test.mark.xfail("False", run=False, reason="huh")
+            @pytest.mark.xfail("False", run=False, reason="huh")
             def test_this_false():
                 assert 1
         """)
@@ -192,8 +192,8 @@ class TestXFail:
 
     def test_xfail_not_run_no_setup_run(self, testdir):
         p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail(run=False, reason="hello")
+            import pytest
+            @pytest.mark.xfail(run=False, reason="hello")
             def test_this():
                 assert 0
             def setup_module(mod):
@@ -208,8 +208,8 @@ class TestXFail:
 
     def test_xfail_xpass(self, testdir):
         p = testdir.makepyfile(test_one="""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_that():
                 assert 1
         """)
@@ -222,9 +222,9 @@ class TestXFail:
 
     def test_xfail_imperative(self, testdir):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def test_this():
-                py.test.xfail("hello")
+                pytest.xfail("hello")
         """)
         result = testdir.runpytest(p)
         result.stdout.fnmatch_lines([
@@ -238,14 +238,14 @@ class TestXFail:
         result = testdir.runpytest(p, "--runxfail")
         result.stdout.fnmatch_lines([
             "*def test_this():*",
-            "*py.test.xfail*",
+            "*pytest.xfail*",
         ])
 
     def test_xfail_imperative_in_setup_function(self, testdir):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def setup_function(function):
-                py.test.xfail("hello")
+                pytest.xfail("hello")
 
             def test_this():
                 assert 0
@@ -262,14 +262,14 @@ class TestXFail:
         result = testdir.runpytest(p, "--runxfail")
         result.stdout.fnmatch_lines([
             "*def setup_function(function):*",
-            "*py.test.xfail*",
+            "*pytest.xfail*",
         ])
 
     def xtest_dynamic_xfail_set_during_setup(self, testdir):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def setup_function(function):
-                py.test.mark.xfail(function)
+                pytest.mark.xfail(function)
             def test_this():
                 assert 0
             def test_that():
@@ -283,9 +283,9 @@ class TestXFail:
 
     def test_dynamic_xfail_no_run(self, testdir):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.xfail(run=False))
+                request.applymarker(pytest.mark.xfail(run=False))
             def test_this(arg):
                 assert 0
         """)
@@ -297,9 +297,9 @@ class TestXFail:
 
     def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
         p = testdir.makepyfile("""
-            import py
+            import pytest
             def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.xfail)
+                request.applymarker(pytest.mark.xfail)
             def test_this2(arg):
                 assert 0
         """)
@@ -312,19 +312,19 @@ class TestXFail:
 class TestSkipif:
     def test_skipif_conditional(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.skipif("hasattr(os, 'sep')")
+            import pytest
+            @pytest.mark.skipif("hasattr(os, 'sep')")
             def test_func():
                 pass
         """)
-        x = py.test.raises(py.test.skip.Exception, "pytest_runtest_setup(item)")
+        x = pytest.raises(pytest.skip.Exception, "pytest_runtest_setup(item)")
         assert x.value.msg == "condition: hasattr(os, 'sep')"
 
 
     def test_skipif_reporting(self, testdir):
         p = testdir.makepyfile("""
-            import py
-            @py.test.mark.skipif("hasattr(sys, 'platform')")
+            import pytest
+            @pytest.mark.skipif("hasattr(sys, 'platform')")
             def test_that():
                 assert 0
         """)
@@ -337,9 +337,9 @@ class TestSkipif:
 
 def test_skip_not_report_default(testdir):
     p = testdir.makepyfile(test_one="""
-        import py
+        import pytest
         def test_this():
-            py.test.skip("hello")
+            pytest.skip("hello")
     """)
     result = testdir.runpytest(p, '-v')
     result.stdout.fnmatch_lines([
@@ -350,10 +350,10 @@ def test_skip_not_report_default(testdir
 
 def test_skipif_class(testdir):
     p = testdir.makepyfile("""
-        import py
+        import pytest
 
         class TestClass:
-            pytestmark = py.test.mark.skipif("True")
+            pytestmark = pytest.mark.skipif("True")
             def test_that(self):
                 assert 0
             def test_though(self):
@@ -407,9 +407,9 @@ def test_skipped_reasons_functional(test
             doskip()
        """,
        conftest = """
-            import py
+            import pytest
             def doskip():
-                py.test.skip('test')
+                pytest.skip('test')
         """
     )
     result = testdir.runpytest('--report=skipped')
@@ -422,17 +422,17 @@ def test_skipped_reasons_functional(test
 
 def test_reportchars(testdir):
     testdir.makepyfile("""
-        import py
+        import pytest
         def test_1():
             assert 0
-        @py.test.mark.xfail
+        @pytest.mark.xfail
         def test_2():
             assert 0
-        @py.test.mark.xfail
+        @pytest.mark.xfail
         def test_3():
             pass
         def test_4():
-            py.test.skip("four")
+            pytest.skip("four")
     """)
     result = testdir.runpytest("-rfxXs")
     result.stdout.fnmatch_lines([

--- a/testing/test_junitxml.py
+++ b/testing/test_junitxml.py
@@ -20,17 +20,17 @@ def assert_attr(node, **kwargs):
 class TestPython:
     def test_summing_simple(self, testdir):
         testdir.makepyfile("""
-            import py
+            import pytest
             def test_pass():
                 pass
             def test_fail():
                 assert 0
             def test_skip():
-                py.test.skip("")
-            @py.test.mark.xfail
+                pytest.skip("")
+            @pytest.mark.xfail
             def test_xfail():
                 assert 0
-            @py.test.mark.xfail
+            @pytest.mark.xfail
             def test_xpass():
                 assert 1
         """)
@@ -157,9 +157,9 @@ class TestPython:
 
     def test_xfailure_function(self, testdir):
         testdir.makepyfile("""
-            import py
+            import pytest
             def test_xfail():
-                py.test.xfail("42")
+                pytest.xfail("42")
         """)
         result, dom = runandparse(testdir)
         assert not result.ret
@@ -175,8 +175,8 @@ class TestPython:
 
     def test_xfailure_xpass(self, testdir):
         testdir.makepyfile("""
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_xpass():
                 pass
         """)
@@ -207,7 +207,7 @@ class TestPython:
         assert "SyntaxError" in fnode.toxml()
 
     def test_collect_skipped(self, testdir):
-        testdir.makepyfile("import py ; py.test.skip('xyz')")
+        testdir.makepyfile("import pytest; pytest.skip('xyz')")
         result, dom = runandparse(testdir)
         assert not result.ret
         node = dom.getElementsByTagName("testsuite")[0]

--- a/testing/test_resultlog.py
+++ b/testing/test_resultlog.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 import os
 from _pytest.resultlog import generic_path, ResultLog, \
         pytest_configure, pytest_unconfigure
@@ -79,7 +79,8 @@ class TestWithFunctionIntegration:
 
     def test_collection_report(self, testdir):
         ok = testdir.makepyfile(test_collection_ok="")
-        skip = testdir.makepyfile(test_collection_skip="import py ; py.test.skip('hello')")
+        skip = testdir.makepyfile(test_collection_skip=
+            "import pytest ; pytest.skip('hello')")
         fail = testdir.makepyfile(test_collection_fail="XXX")
         lines = self.getresultlog(testdir, ok)
         assert not lines
@@ -101,14 +102,14 @@ class TestWithFunctionIntegration:
 
     def test_log_test_outcomes(self, testdir):
         mod = testdir.makepyfile(test_mod="""
-            import py
+            import pytest
             def test_pass(): pass
-            def test_skip(): py.test.skip("hello")
+            def test_skip(): pytest.skip("hello")
             def test_fail(): raise ValueError("FAIL")
 
-            @py.test.mark.xfail
+            @pytest.mark.xfail
             def test_xfail(): raise ValueError("XFAIL")
-            @py.test.mark.xfail
+            @pytest.mark.xfail
             def test_xpass(): pass
 
         """)
@@ -152,17 +153,17 @@ class TestWithFunctionIntegration:
 def test_generic(testdir, LineMatcher):
     testdir.plugins.append("resultlog")
     testdir.makepyfile("""
-        import py
+        import pytest
         def test_pass():
             pass
         def test_fail():
             assert 0
         def test_skip():
-            py.test.skip("")
-        @py.test.mark.xfail
+            pytest.skip("")
+        @pytest.mark.xfail
         def test_xfail():
             assert 0
-        @py.test.mark.xfail(run=False)
+        @pytest.mark.xfail(run=False)
         def test_xfail_norun():
             assert 0
     """)

--- a/doc/example/mysetup.txt
+++ b/doc/example/mysetup.txt
@@ -49,7 +49,7 @@ You can now run the test::
 
     $ py.test test_sample.py
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_sample.py
     
     test_sample.py F
@@ -57,7 +57,7 @@ You can now run the test::
     ================================= FAILURES =================================
     _______________________________ test_answer ________________________________
     
-    mysetup = <conftest.MySetup instance at 0x1ca5cf8>
+    mysetup = <conftest.MySetup instance at 0x16f5998>
     
         def test_answer(mysetup):
             app = mysetup.myapp()
@@ -84,7 +84,7 @@ the previous example to add a command li
 and to offer a new mysetup method::
 
     # content of ./conftest.py
-    import py
+    import pytest
     from myapp import MyApp
 
     def pytest_funcarg__mysetup(request): # "mysetup" factory function
@@ -105,7 +105,7 @@ and to offer a new mysetup method::
         def getsshconnection(self):
             host = self.config.option.ssh
             if host is None:
-                py.test.skip("specify ssh host with --ssh")
+                pytest.skip("specify ssh host with --ssh")
             return execnet.SshGateway(host)
 
 
@@ -122,14 +122,14 @@ Running it yields::
 
     $ py.test test_ssh.py -rs
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_ssh.py
     
     test_ssh.py s
     ========================= short test summary info ==========================
-    SKIP [1] /tmp/doc-exec-560/conftest.py:22: specify ssh host with --ssh
+    SKIP [1] /tmp/doc-exec-107/conftest.py:22: specify ssh host with --ssh
     
-    ======================== 1 skipped in 0.03 seconds =========================
+    ======================== 1 skipped in 0.02 seconds =========================
 
 If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected.
 

--- a/testing/test_conftest.py
+++ b/testing/test_conftest.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 from _pytest.config import Conftest
 
 def pytest_generate_tests(metafunc):
@@ -56,8 +56,8 @@ class TestConftestValueAccessGlobal:
 
     def test_value_access_not_existing(self, basedir):
         conftest = ConftestWithSetinitial(basedir)
-        py.test.raises(KeyError, "conftest.rget('a')")
-        #py.test.raises(KeyError, "conftest.lget('a')")
+        pytest.raises(KeyError, "conftest.rget('a')")
+        #pytest.raises(KeyError, "conftest.lget('a')")
 
     def test_value_access_by_path(self, basedir):
         conftest = ConftestWithSetinitial(basedir)
@@ -66,7 +66,7 @@ class TestConftestValueAccessGlobal:
         assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5
         #assert conftest.lget("a", basedir.join('adir', 'b')) == 1
         #assert conftest.lget("b", basedir.join('adir', 'b')) == 2
-        #assert py.test.raises(KeyError,
+        #assert pytest.raises(KeyError,
         #    'conftest.lget("b", basedir.join("a"))'
         #)
 
@@ -109,7 +109,7 @@ def test_doubledash_not_considered(testd
 def test_conftest_global_import(testdir):
     testdir.makeconftest("x=3")
     p = testdir.makepyfile("""
-        import py
+        import py, pytest
         from _pytest.config import Conftest
         conf = Conftest()
         mod = conf.importconftest(py.path.local("conftest.py"))
@@ -185,7 +185,7 @@ def test_conftest_samecontent_detection(
     assert len(l) == 1
     assert l[0].__file__ == p.join("conftest.py")
 
- at py.test.mark.multi(name='test tests whatever .dotdir'.split())
+ at pytest.mark.multi(name='test tests whatever .dotdir'.split())
 def test_setinitial_conftest_subdirs(testdir, name):
     sub = testdir.mkdir(name)
     subconftest = sub.ensure("conftest.py")

--- a/testing/test_parseopt.py
+++ b/testing/test_parseopt.py
@@ -1,11 +1,11 @@
-import py
+import py, pytest
 from _pytest import config as parseopt
 from textwrap import dedent
 
 class TestParser:
     def test_no_help_by_default(self, capsys):
         parser = parseopt.Parser(usage="xyz")
-        py.test.raises(SystemExit, 'parser.parse(["-h"])')
+        pytest.raises(SystemExit, 'parser.parse(["-h"])')
         out, err = capsys.readouterr()
         assert err.find("no such option") != -1
 
@@ -41,7 +41,7 @@ class TestParser:
     def test_group_shortopt_lowercase(self):
         parser = parseopt.Parser()
         group = parser.getgroup("hello")
-        py.test.raises(ValueError, """
+        pytest.raises(ValueError, """
             group.addoption("-x", action="store_true")
         """)
         assert len(group.options) == 0
@@ -102,7 +102,7 @@ class TestParser:
         assert option.this == 42
 
 
- at py.test.mark.skipif("sys.version_info < (2,5)")
+ at pytest.mark.skipif("sys.version_info < (2,5)")
 def test_addoption_parser_epilog(testdir):
     testdir.makeconftest("""
         def pytest_addoption(parser):

--- a/testing/test_runner_xunit.py
+++ b/testing/test_runner_xunit.py
@@ -152,9 +152,9 @@ def test_failing_setup_calls_teardown(te
 
 def test_setup_that_skips_calledagain_and_teardown(testdir):
     p = testdir.makepyfile("""
-        import py
+        import pytest
         def setup_module(mod):
-            py.test.skip("x")
+            pytest.skip("x")
         def test_function1():
             pass
         def test_function2():
@@ -170,7 +170,7 @@ def test_setup_that_skips_calledagain_an
 
 def test_setup_fails_again_on_all_tests(testdir):
     p = testdir.makepyfile("""
-        import py
+        import pytest
         def setup_module(mod):
             raise ValueError(42)
         def test_function1():
@@ -188,7 +188,7 @@ def test_setup_fails_again_on_all_tests(
 
 def test_setup_funcarg_setup_not_called_if_outer_scope_fails(testdir):
     p = testdir.makepyfile("""
-        import py
+        import pytest
         def setup_module(mod):
             raise ValueError(42)
         def pytest_funcarg__hello(request):

--- a/testing/test_runner.py
+++ b/testing/test_runner.py
@@ -1,4 +1,4 @@
-import py, sys
+import pytest, py, sys
 from _pytest import runner
 from py._code.code import ReprExceptionInfo
 
@@ -41,8 +41,8 @@ class TestSetupState:
             def test_func(): pass
         """)
         ss = runner.SetupState()
-        py.test.raises(ValueError, "ss.prepare(item)")
-        py.test.raises(ValueError, "ss.prepare(item)")
+        pytest.raises(ValueError, "ss.prepare(item)")
+        pytest.raises(ValueError, "ss.prepare(item)")
 
 class BaseFunctionalTests:
     def test_passfunction(self, testdir):
@@ -71,9 +71,9 @@ class BaseFunctionalTests:
 
     def test_skipfunction(self, testdir):
         reports = testdir.runitem("""
-            import py
+            import pytest
             def test_func():
-                py.test.skip("hello")
+                pytest.skip("hello")
         """)
         rep = reports[1]
         assert not rep.failed
@@ -89,9 +89,9 @@ class BaseFunctionalTests:
 
     def test_skip_in_setup_function(self, testdir):
         reports = testdir.runitem("""
-            import py
+            import pytest
             def setup_function(func):
-                py.test.skip("hello")
+                pytest.skip("hello")
             def test_func():
                 pass
         """)
@@ -108,7 +108,7 @@ class BaseFunctionalTests:
 
     def test_failure_in_setup_function(self, testdir):
         reports = testdir.runitem("""
-            import py
+            import pytest
             def setup_function(func):
                 raise ValueError(42)
             def test_func():
@@ -123,7 +123,7 @@ class BaseFunctionalTests:
 
     def test_failure_in_teardown_function(self, testdir):
         reports = testdir.runitem("""
-            import py
+            import pytest
             def teardown_function(func):
                 raise ValueError(42)
             def test_func():
@@ -147,7 +147,7 @@ class BaseFunctionalTests:
                     return "hello"
         """)
         reports = testdir.runitem("""
-            import py
+            import pytest
             def test_func():
                 assert 0
         """)
@@ -226,7 +226,7 @@ class TestExecutionNonForked(BaseFunctio
             py.test.fail("did not raise")
 
 class TestExecutionForked(BaseFunctionalTests):
-    pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
+    pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')")
 
     def getrunner(self):
         # XXX re-arrange this test to live in pytest-xdist
@@ -289,7 +289,7 @@ def test_callinfo():
 
 # design question: do we want general hooks in python files?
 # then something like the following functional tests makes sense
- at py.test.mark.xfail
+ at pytest.mark.xfail
 def test_runtest_in_module_ordering(testdir):
     p1 = testdir.makepyfile("""
         def pytest_runtest_setup(item): # runs after class-level!
@@ -336,8 +336,8 @@ def test_pytest_fail():
 
 def test_exception_printing_skip():
     try:
-        py.test.skip("hello")
-    except py.test.skip.Exception:
+        pytest.skip("hello")
+    except pytest.skip.Exception:
         excinfo = py.code.ExceptionInfo()
         s = excinfo.exconly(tryshort=True)
         assert s.startswith("Skipped")
@@ -351,20 +351,20 @@ def test_importorskip():
         assert sys == py.std.sys
         #path = py.test.importorskip("os.path")
         #assert path == py.std.os.path
-        excinfo = py.test.raises(py.test.skip.Exception, f)
+        excinfo = pytest.raises(pytest.skip.Exception, f)
         path = py.path.local(excinfo.getrepr().reprcrash.path)
         # check that importorskip reports the actual call
         # in this test the test_runner.py file
         assert path.purebasename == "test_runner"
-        py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
-        py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
+        pytest.raises(SyntaxError, "py.test.importorskip('x y z')")
+        pytest.raises(SyntaxError, "py.test.importorskip('x=y')")
         path = importorskip("py", minversion=".".join(py.__version__))
         mod = py.std.types.ModuleType("hello123")
         mod.__version__ = "1.3"
-        py.test.raises(py.test.skip.Exception, """
+        pytest.raises(pytest.skip.Exception, """
             py.test.importorskip("hello123", minversion="5.0")
         """)
-    except py.test.skip.Exception:
+    except pytest.skip.Exception:
         print(py.code.ExceptionInfo())
         py.test.fail("spurious skip")
 

--- a/testing/test_unittest.py
+++ b/testing/test_unittest.py
@@ -1,4 +1,4 @@
-import py
+import pytest
 
 def test_simple_unittest(testdir):
     testpath = testdir.makepyfile("""
@@ -73,8 +73,8 @@ def test_teardown(testdir):
 def test_module_level_pytestmark(testdir):
     testpath = testdir.makepyfile("""
         import unittest
-        import py
-        pytestmark = py.test.mark.xfail
+        import pytest
+        pytestmark = pytest.mark.xfail
         class MyTestCase(unittest.TestCase):
             def test_func1(self):
                 assert 0
@@ -85,7 +85,7 @@ def test_module_level_pytestmark(testdir
 def test_class_setup(testdir):
     testpath = testdir.makepyfile("""
         import unittest
-        import py
+        import pytest
         class MyTestCase(unittest.TestCase):
             x = 0
             @classmethod

--- a/doc/monkeypatch.txt
+++ b/doc/monkeypatch.txt
@@ -39,8 +39,8 @@ will be undone.
 .. background check:
    $ py.test
    =========================== test session starts ============================
-   platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-   test path 1: /tmp/doc-exec-528
+   platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+   test path 1: /tmp/doc-exec-75
    
    =============================  in 0.00 seconds =============================
 

--- a/doc/faq.txt
+++ b/doc/faq.txt
@@ -65,9 +65,9 @@ Is using funcarg- versus xUnit setup a s
 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
 For simple applications and for people experienced with nose_ or
-unittest-style test setup using `xUnit style setup`_
+unittest-style test setup using `xUnit style setup`_ often
 feels natural.  For larger test suites, parametrized testing
-or setup of complex test resources using funcargs_ is recommended.
+or setup of complex test resources using funcargs_ may feel more natural.
 Moreover, funcargs are ideal for writing advanced test support
 code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs)
 because the support code can register setup/teardown functions
@@ -82,18 +82,17 @@ in a managed class/module/function scope
 Why the ``pytest_funcarg__*`` name for funcarg factories?
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
-When experimenting with funcargs an explicit registration mechanism
-was considered.  But lacking a good use case for this indirection and
-flexibility we decided to go for `Convention over Configuration`_ and
-allow to directly specify the factory.  Besides removing the need
-for an indirection it allows to "grep" for ``pytest_funcarg__MYARG``
-and will safely find all factory functions for the ``MYARG`` function
-argument.  It helps to alleviate the de-coupling of function
-argument usage and creation.
+We alternatively implemented an explicit registration mechanism for
+function argument factories.  But lacking a good use case for this
+indirection and flexibility we decided to go for `Convention over
+Configuration`_ and rather have factories specified by convention.
+Besides removing the need for an registration indirection it allows to
+"grep" for ``pytest_funcarg__MYARG`` and will safely find all factory
+functions for the ``MYARG`` function argument.
 
 .. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
 
-Can I yield multiple values from a factory function?
+Can I yield multiple values from a funcarg factory function?
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
 There are two conceptual reasons why yielding from a factory function
@@ -126,24 +125,16 @@ by pickling and thus implicitely re-impo
 Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
 protect its generated command line script.  This leads to infinite
 recursion when running a test that instantiates Processes.
-There are these workarounds:
 
-* `install Distribute`_ as a drop-in replacement for setuptools
-  and install py.test
-
-* `directly use a checkout`_ which avoids all setuptools/Distribute
-  installation
-
-If those options are not available to you, you may also manually
+A good solution is to `install Distribute`_ as a drop-in replacement
+for setuptools and then re-install ``pytest``.  Otherwise you could
 fix the script that is created by setuptools by inserting an
 ``if __name__ == '__main__'``.  Or you can create a "pytest.py"
 script with this content and invoke that with the python version::
 
-    import py
+    import pytest
     if __name__ == '__main__':
-        py.cmdline.pytest()
-
-.. _`directly use a checkout`: install.html#directly-use-a-checkout
+        pytest.main()
 
 .. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions
 

--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 
 from _pytest.config import getcfg, Config
 
@@ -40,7 +40,7 @@ class TestParseIni:
             "*tox.ini:2*requires*9.0*actual*"
         ])
 
-    @py.test.mark.multi(name="setup.cfg tox.ini pytest.ini".split())
+    @pytest.mark.multi(name="setup.cfg tox.ini pytest.ini".split())
     def test_ini_names(self, testdir, name):
         testdir.tmpdir.join(name).write(py.std.textwrap.dedent("""
             [pytest]
@@ -64,7 +64,7 @@ class TestParseIni:
         config.parse([sub])
         assert config.getini("minversion") == "2.0"
 
-    @py.test.mark.xfail(reason="probably not needed")
+    @pytest.mark.xfail(reason="probably not needed")
     def test_confcutdir(self, testdir):
         sub = testdir.mkdir("sub")
         sub.chdir()
@@ -78,7 +78,7 @@ class TestParseIni:
 class TestConfigCmdlineParsing:
     def test_parsing_again_fails(self, testdir):
         config = testdir.reparseconfig([testdir.tmpdir])
-        py.test.raises(AssertionError, "config.parse([])")
+        pytest.raises(AssertionError, "config.parse([])")
 
 
 class TestConfigTmpdir:
@@ -125,21 +125,21 @@ class TestConfigAPI:
         o = testdir.tmpdir
         assert config.getvalue("x") == 1
         assert config.getvalue("x", o.join('sub')) == 2
-        py.test.raises(KeyError, "config.getvalue('y')")
+        pytest.raises(KeyError, "config.getvalue('y')")
         config = testdir.reparseconfig([str(o.join('sub'))])
         assert config.getvalue("x") == 2
         assert config.getvalue("y") == 3
         assert config.getvalue("x", o) == 1
-        py.test.raises(KeyError, 'config.getvalue("y", o)')
+        pytest.raises(KeyError, 'config.getvalue("y", o)')
 
     def test_config_getvalueorskip(self, testdir):
         config = testdir.parseconfig()
-        py.test.raises(py.test.skip.Exception,
+        pytest.raises(pytest.skip.Exception,
             "config.getvalueorskip('hello')")
         verbose = config.getvalueorskip("verbose")
         assert verbose == config.option.verbose
         config.option.hello = None
-        py.test.raises(py.test.skip.Exception,
+        pytest.raises(pytest.skip.Exception,
             "config.getvalueorskip('hello')")
 
     def test_config_overwrite(self, testdir):
@@ -176,7 +176,7 @@ class TestConfigAPI:
         config = testdir.parseconfig()
         val = config.getini("myname")
         assert val == "hello"
-        py.test.raises(ValueError, config.getini, 'other')
+        pytest.raises(ValueError, config.getini, 'other')
 
     def test_addini_pathlist(self, testdir):
         testdir.makeconftest("""
@@ -193,7 +193,7 @@ class TestConfigAPI:
         assert len(l) == 2
         assert l[0] == p.dirpath('hello')
         assert l[1] == p.dirpath('world/sub.py')
-        py.test.raises(ValueError, config.getini, 'other')
+        pytest.raises(ValueError, config.getini, 'other')
 
     def test_addini_args(self, testdir):
         testdir.makeconftest("""

--- a/doc/getting-started.txt
+++ b/doc/getting-started.txt
@@ -14,7 +14,7 @@ Installation options::
 To check your installation has installed the correct version::
 
     $ py.test --version
-    This is py.test version 2.0.0.dev22, imported from /home/hpk/p/pytest/pytest
+    This is py.test version 2.0.0.dev30, imported from /home/hpk/p/pytest/pytest.py
 
 If you get an error checkout :ref:`installation issues`.
 
@@ -34,8 +34,8 @@ That's it. You can execute the test func
 
     $ py.test
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
-    test path 1: /tmp/doc-exec-523
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: /tmp/doc-exec-70
     
     test_sample.py F
     
@@ -79,19 +79,19 @@ If you want to assert some code raises a
 use the ``raises`` helper::
 
     # content of test_sysexit.py
-    import py
+    import pytest
     def f():
         raise SystemExit(1)
 
     def test_mytest():
-        with py.test.raises(SystemExit):
+        with pytest.raises(SystemExit):
             f()
 
 Running it with, this time in "quiet" reporting mode::
 
     $ py.test -q test_sysexit.py
     .
-    1 passed in 0.01 seconds
+    1 passed in 0.00 seconds
 
 .. todo:: For further ways to assert exceptions see the `raises`
 
@@ -121,7 +121,7 @@ run the module by passing its filename::
     ================================= FAILURES =================================
     ____________________________ TestClass.test_two ____________________________
     
-    self = <test_class.TestClass instance at 0x254f6c8>
+    self = <test_class.TestClass instance at 0x288fc20>
     
         def test_two(self):
             x = "hello"
@@ -129,7 +129,7 @@ run the module by passing its filename::
     E       assert hasattr('hello', 'check')
     
     test_class.py:8: AssertionError
-    1 failed, 1 passed in 0.03 seconds
+    1 failed, 1 passed in 0.02 seconds
 
 The first test passed, the second failed. Again we can easily see
 the intermediate values used in the assertion, helping us to
@@ -149,7 +149,7 @@ arbitrary resources, for example a uniqu
         assert 0
 
 We list the name ``tmpdir`` in the test function signature and
-py.test will lookup and call a factory to create the resource 
+py.test will lookup and call a factory to create the resource
 before performing the test function call.  Let's just run it::
 
     $ py.test -q test_tmpdir.py
@@ -157,7 +157,7 @@ before performing the test function call
     ================================= FAILURES =================================
     _____________________________ test_needsfiles ______________________________
     
-    tmpdir = local('/tmp/pytest-446/test_needsfiles0')
+    tmpdir = local('/tmp/pytest-122/test_needsfiles0')
     
         def test_needsfiles(tmpdir):
             print tmpdir
@@ -166,8 +166,8 @@ before performing the test function call
     
     test_tmpdir.py:3: AssertionError
     ----------------------------- Captured stdout ------------------------------
-    /tmp/pytest-446/test_needsfiles0
-    1 failed in 0.07 seconds
+    /tmp/pytest-122/test_needsfiles0
+    1 failed in 0.05 seconds
 
 Before the test runs, a unique-per-test-invocation temporary directory
 was created.  More info at :ref:`tmpdir handling`.

--- a/testing/test_monkeypatch.py
+++ b/testing/test_monkeypatch.py
@@ -1,12 +1,12 @@
 import os, sys
-import py
+import pytest
 from _pytest.monkeypatch import monkeypatch as MonkeyPatch
 
 def test_setattr():
     class A:
         x = 1
     monkeypatch = MonkeyPatch()
-    py.test.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
+    pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
     monkeypatch.setattr(A, 'y', 2, raising=False)
     assert A.y == 2
     monkeypatch.undo()
@@ -35,7 +35,7 @@ def test_delattr():
 
     monkeypatch = MonkeyPatch()
     monkeypatch.delattr(A, 'x')
-    py.test.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
+    pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
     monkeypatch.delattr(A, 'y', raising=False)
     monkeypatch.setattr(A, 'x', 5, raising=False)
     assert A.x == 5
@@ -65,7 +65,7 @@ def test_delitem():
     monkeypatch.delitem(d, 'x')
     assert 'x' not in d
     monkeypatch.delitem(d, 'y', raising=False)
-    py.test.raises(KeyError, "monkeypatch.delitem(d, 'y')")
+    pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
     assert not d
     monkeypatch.setitem(d, 'y', 1700)
     assert d['y'] == 1700
@@ -87,7 +87,7 @@ def test_delenv():
     name = 'xyz1234'
     assert name not in os.environ
     monkeypatch = MonkeyPatch()
-    py.test.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
+    pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
     monkeypatch.delenv(name, raising=False)
     monkeypatch.undo()
     os.environ[name] = "1"

--- a/doc/usage.txt
+++ b/doc/usage.txt
@@ -10,7 +10,7 @@ Usage and Invocations
 calling pytest through ``python -m pytest``
 -----------------------------------------------------
 
-.. versionadded: 2.0
+.. versionadded:: 2.0
 
 If you use Python-2.5 or above you can invoke testing through the
 Python interpreter from the command line::
@@ -88,9 +88,10 @@ Setting a breakpoint / aka ``set_trace()
 If you want to set a breakpoint and enter the ``pdb.set_trace()`` you
 can use a helper::
 
+    import pytest
     def test_function():
         ...
-        py.test.set_trace()    # invoke PDB debugger and tracing
+        pytest.set_trace()    # invoke PDB debugger and tracing
 
 .. versionadded: 2.0.0
 
@@ -140,7 +141,7 @@ Currently only pasting to the http://pas
 calling pytest from Python code
 ----------------------------------------------------
 
-.. versionadded: 2.0
+.. versionadded:: 2.0
 
 You can invoke ``py.test`` from Python code directly::
 

--- a/testing/test_nose.py
+++ b/testing/test_nose.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 
 def setup_module(mod):
     mod.nose = py.test.importorskip("nose")

--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -1,4 +1,4 @@
-import py
+import pytest
 import sys
 
 pytest_plugins = "pytester",
@@ -91,5 +91,5 @@ def pytest_funcarg__anypython(request):
                 executable = py.path.local(executable)
                 if executable.check():
                     return executable
-        py.test.skip("no %s found" % (name,))
+        pytest.skip("no %s found" % (name,))
     return executable

--- a/testing/test_tmpdir.py
+++ b/testing/test_tmpdir.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 
 from _pytest.tmpdir import pytest_funcarg__tmpdir
 from _pytest.python import FuncargRequest

--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 import sys
 
 class TestPDB:
@@ -23,8 +23,8 @@ class TestPDB:
 
     def test_pdb_on_xfail(self, testdir, pdblist):
         rep = testdir.inline_runsource1('--pdb', """
-            import py
-            @py.test.mark.xfail
+            import pytest
+            @pytest.mark.xfail
             def test_func():
                 assert 0
         """)
@@ -33,16 +33,16 @@ class TestPDB:
 
     def test_pdb_on_skip(self, testdir, pdblist):
         rep = testdir.inline_runsource1('--pdb', """
-            import py
+            import pytest
             def test_func():
-                py.test.skip("hello")
+                pytest.skip("hello")
         """)
         assert rep.skipped
         assert len(pdblist) == 0
 
     def test_pdb_on_BdbQuit(self, testdir, pdblist):
         rep = testdir.inline_runsource1('--pdb', """
-            import py, bdb
+            import bdb
             def test_func():
                 raise bdb.BdbQuit
         """)
@@ -68,15 +68,15 @@ class TestPDB:
 
     def test_pdb_interaction_exception(self, testdir):
         p1 = testdir.makepyfile("""
-            import py
+            import pytest
             def globalfunc():
                 pass
             def test_1():
-                py.test.raises(ValueError, globalfunc)
+                pytest.raises(ValueError, globalfunc)
         """)
         child = testdir.spawn_pytest("--pdb %s" % p1)
         child.expect(".*def test_1")
-        child.expect(".*py.test.raises.*globalfunc")
+        child.expect(".*pytest.raises.*globalfunc")
         child.expect("(Pdb)")
         child.sendline("globalfunc")
         child.expect(".*function")
@@ -87,11 +87,11 @@ class TestPDB:
 
     def test_pdb_interaction_capturing_simple(self, testdir):
         p1 = testdir.makepyfile("""
-            import py
+            import pytest
             def test_1():
                 i = 0
                 print ("hello17")
-                py.test.set_trace()
+                pytest.set_trace()
                 x = 3
         """)
         child = testdir.spawn_pytest(str(p1))
@@ -108,14 +108,14 @@ class TestPDB:
 
     def test_pdb_interaction_capturing_twice(self, testdir):
         p1 = testdir.makepyfile("""
-            import py
+            import pytest
             def test_1():
                 i = 0
                 print ("hello17")
-                py.test.set_trace()
+                pytest.set_trace()
                 x = 3
                 print ("hello18")
-                py.test.set_trace()
+                pytest.set_trace()
                 x = 4
         """)
         child = testdir.spawn_pytest(str(p1))
@@ -135,8 +135,8 @@ class TestPDB:
 
     def test_pdb_used_outside_test(self, testdir):
         p1 = testdir.makepyfile("""
-            import py
-            py.test.set_trace()
+            import pytest
+            pytest.set_trace()
             x = 5
         """)
         child = testdir.spawn("%s %s" %(sys.executable, p1))

--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -1,10 +1,10 @@
-import py
+import py, pytest
 from _pytest.mark import MarkGenerator as Mark
 
 class TestMark:
     def test_pytest_mark_notcallable(self):
         mark = Mark()
-        py.test.raises((AttributeError, TypeError), "mark()")
+        pytest.raises((AttributeError, TypeError), "mark()")
 
     def test_pytest_mark_bare(self):
         mark = Mark()
@@ -47,8 +47,8 @@ class TestMark:
 class TestFunctional:
     def test_mark_per_function(self, testdir):
         p = testdir.makepyfile("""
-            import py
-            @py.test.mark.hello
+            import pytest
+            @pytest.mark.hello
             def test_hello():
                 assert hasattr(test_hello, 'hello')
         """)
@@ -57,8 +57,8 @@ class TestFunctional:
 
     def test_mark_per_module(self, testdir):
         item = testdir.getitem("""
-            import py
-            pytestmark = py.test.mark.hello
+            import pytest
+            pytestmark = pytest.mark.hello
             def test_func():
                 pass
         """)
@@ -67,9 +67,9 @@ class TestFunctional:
 
     def test_marklist_per_class(self, testdir):
         item = testdir.getitem("""
-            import py
+            import pytest
             class TestClass:
-                pytestmark = [py.test.mark.hello, py.test.mark.world]
+                pytestmark = [pytest.mark.hello, pytest.mark.world]
                 def test_func(self):
                     assert TestClass.test_func.hello
                     assert TestClass.test_func.world
@@ -79,8 +79,8 @@ class TestFunctional:
 
     def test_marklist_per_module(self, testdir):
         item = testdir.getitem("""
-            import py
-            pytestmark = [py.test.mark.hello, py.test.mark.world]
+            import pytest
+            pytestmark = [pytest.mark.hello, pytest.mark.world]
             class TestClass:
                 def test_func(self):
                     assert TestClass.test_func.hello
@@ -90,11 +90,11 @@ class TestFunctional:
         assert 'hello' in keywords
         assert 'world' in keywords
 
-    @py.test.mark.skipif("sys.version_info < (2,6)")
+    @pytest.mark.skipif("sys.version_info < (2,6)")
     def test_mark_per_class_decorator(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.hello
+            import pytest
+            @pytest.mark.hello
             class TestClass:
                 def test_func(self):
                     assert TestClass.test_func.hello
@@ -102,13 +102,13 @@ class TestFunctional:
         keywords = item.keywords
         assert 'hello' in keywords
 
-    @py.test.mark.skipif("sys.version_info < (2,6)")
+    @pytest.mark.skipif("sys.version_info < (2,6)")
     def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
         item = testdir.getitem("""
-            import py
-            @py.test.mark.hello
+            import pytest
+            @pytest.mark.hello
             class TestClass:
-                pytestmark = py.test.mark.world
+                pytestmark = pytest.mark.world
                 def test_func(self):
                     assert TestClass.test_func.hello
                     assert TestClass.test_func.world
@@ -119,12 +119,12 @@ class TestFunctional:
 
     def test_merging_markers(self, testdir):
         p = testdir.makepyfile("""
-            import py
-            pytestmark = py.test.mark.hello("pos1", x=1, y=2)
+            import pytest
+            pytestmark = pytest.mark.hello("pos1", x=1, y=2)
             class TestClass:
                 # classlevel overrides module level
-                pytestmark = py.test.mark.hello(x=3)
-                @py.test.mark.hello("pos0", z=4)
+                pytestmark = pytest.mark.hello(x=3)
+                @pytest.mark.hello("pos0", z=4)
                 def test_func(self):
                     pass
         """)
@@ -136,9 +136,9 @@ class TestFunctional:
         assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
 
     def test_mark_other(self, testdir):
-        py.test.raises(TypeError, '''
+        pytest.raises(TypeError, '''
             testdir.getitem("""
-                import py
+                import pytest
                 class pytestmark:
                     pass
                 def test_func():
@@ -148,9 +148,9 @@ class TestFunctional:
 
     def test_mark_dynamically_in_funcarg(self, testdir):
         testdir.makeconftest("""
-            import py
+            import pytest
             def pytest_funcarg__arg(request):
-                request.applymarker(py.test.mark.hello)
+                request.applymarker(pytest.mark.hello)
             def pytest_terminal_summary(terminalreporter):
                 l = terminalreporter.stats['passed']
                 terminalreporter._tw.line("keyword: %s" % l[0].keywords)
@@ -187,7 +187,7 @@ class Test_genitems:
         # do we want to unify behaviour with
         # test_subdir_conftest_error?
         p = testdir.makepyfile(conftest="raise SyntaxError\n")
-        py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
+        pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
 
     def test_example_items1(self, testdir):
         p = testdir.makepyfile('''
@@ -247,7 +247,6 @@ class TestKeywordSelection:
                     pass
         """)
         testdir.makepyfile(conftest="""
-            import py
             def pytest_pycollect_makeitem(__multicall__, name):
                 if name == "TestClass":
                     item = __multicall__.execute()

--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -130,7 +130,7 @@ class TestGeneralUsage:
         assert result.ret != 0
         assert "should be seen" in result.stdout.str()
 
-    @py.test.mark.skipif("not hasattr(os, 'symlink')")
+    @pytest.mark.skipif("not hasattr(os, 'symlink')")
     def test_chdir(self, testdir):
         testdir.tmpdir.join("py").mksymlinkto(py._pydir)
         p = testdir.tmpdir.join("main.py")
@@ -273,7 +273,7 @@ class TestInvocationVariants:
 
     def test_double_pytestcmdline(self, testdir):
         p = testdir.makepyfile(run="""
-            import py, pytest
+            import pytest
             pytest.main()
             pytest.main()
         """)
@@ -287,19 +287,19 @@ class TestInvocationVariants:
             "*1 passed*",
         ])
 
-    @py.test.mark.skipif("sys.version_info < (2,5)")
+    @pytest.mark.skipif("sys.version_info < (2,5)")
     def test_python_minus_m_invocation_ok(self, testdir):
         p1 = testdir.makepyfile("def test_hello(): pass")
         res = testdir.run(py.std.sys.executable, "-m", "py.test", str(p1))
         assert res.ret == 0
 
-    @py.test.mark.skipif("sys.version_info < (2,5)")
+    @pytest.mark.skipif("sys.version_info < (2,5)")
     def test_python_minus_m_invocation_fail(self, testdir):
         p1 = testdir.makepyfile("def test_fail(): 0/0")
         res = testdir.run(py.std.sys.executable, "-m", "py.test", str(p1))
         assert res.ret == 1
 
-    @py.test.mark.skipif("sys.version_info < (2,5)")
+    @pytest.mark.skipif("sys.version_info < (2,5)")
     def test_python_pytest_package(self, testdir):
         p1 = testdir.makepyfile("def test_pass(): pass")
         res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
@@ -359,7 +359,7 @@ class TestInvocationVariants:
         ])
 
 
-    @py.test.mark.xfail(reason="decide: feature or bug")
+    @pytest.mark.xfail(reason="decide: feature or bug")
     def test_noclass_discovery_if_not_testcase(self, testdir):
         testpath = testdir.makepyfile("""
             import unittest

--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -1,5 +1,5 @@
 from _pytest.doctest import DoctestModule, DoctestTextfile
-import py
+import py, pytest
 
 class TestDoctests:
 
@@ -58,7 +58,7 @@ class TestDoctests:
         assert call.report.longrepr
         # XXX
         #testitem, = items
-        #excinfo = py.test.raises(Failed, "testitem.runtest()")
+        #excinfo = pytest.raises(Failed, "testitem.runtest()")
         #repr = testitem.repr_failure(excinfo, ("", ""))
         #assert repr.reprlocation
 

--- a/doc/example/controlskip.txt
+++ b/doc/example/controlskip.txt
@@ -8,22 +8,22 @@ Here is a ``conftest.py`` file adding a 
 line option to control skipping of ``slow`` marked tests::
 
     # content of conftest.py
-    
-    import py
+
+    import pytest
     def pytest_addoption(parser):
         parser.addoption("--runslow", action="store_true",
             help="run slow tests")
 
     def pytest_runtest_setup(item):
         if 'slow' in item.keywords and not item.config.getvalue("runslow"):
-            py.test.skip("need --runslow option to run")
+            pytest.skip("need --runslow option to run")
 
 We can now write a test module like this::
 
     # content of test_module.py
-    
-    import py
-    slow = py.test.mark.slow
+
+    import pytest
+    slow = pytest.mark.slow
 
     def test_func_fast():
         pass
@@ -34,14 +34,14 @@ We can now write a test module like this
 
 and when running it will see a skipped "slow" test::
 
-    $ py.test test_module.py -rs    # "-rs" means report on the little 's'
+    $ py.test test_module.py -rs    # "-rs" means report details on the little 's'
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_module.py
     
     test_module.py .s
     ========================= short test summary info ==========================
-    SKIP [1] /tmp/doc-exec-557/conftest.py:9: need --runslow option to run
+    SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
     
     =================== 1 passed, 1 skipped in 0.02 seconds ====================
 
@@ -49,7 +49,7 @@ Or run it including the ``slow`` marked 
 
     $ py.test test_module.py --runslow
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_module.py
     
     test_module.py ..

--- a/testing/test_helpconfig.py
+++ b/testing/test_helpconfig.py
@@ -43,8 +43,8 @@ def test_hookvalidation_unknown(testdir)
 
 def test_hookvalidation_optional(testdir):
     testdir.makeconftest("""
-        import py
-        @py.test.mark.optionalhook
+        import pytest
+        @pytest.mark.optionalhook
         def pytest_hello(xyz):
             pass
     """)

--- a/doc/features.txt
+++ b/doc/features.txt
@@ -15,11 +15,11 @@ no-boilerplate testing with Python
 extensive plugin and customization system
 ------------------------------------------------------
 
-.. _`suprisingly easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
+.. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
 
 - all collection, reporting, running aspects are delegated to hook functions
-- hook functions are defined per-directory, per-project or through PyPI released plugins
-- it is `suprisingly easy`_ to add command line options or
+- hook functions are implemented per-directory, per-project or through PyPI released plugins
+- it is `easy`_ to add command line options or
   do other kind of add-ons and customizations.
 
 mature command line testing tool
@@ -42,24 +42,20 @@ integrates well with CI systems
 .. _`tox`: http://codespeak.net/tox
 
 
-supports several testing practises and methods
+supports common testing practises and methods
 -----------------------------------------------------------
 
 - supports extended `xUnit style setup`_
-- can integrate nose_, `unittest.py` and `doctest.py`_ style tests
+- can integrate ``nose``, ``unittest.py`` and ``doctest.py`` style tests
 - supports generating testing coverage reports
 - supports :ref:`non-python tests`
 - `Javasript unit- and functional testing`_
 
-.. _`Javasript unit- and functional testing`: plugin/oejskit.html
-.. _`coverage testing with figleaf`: plugin/figleaf.html
-.. _`unittest.py`: http://docs.python.org/library/unittest.html
+.. _`Javasript unit- and functional testing`: http://pypi.python.org/pypi/oejskit
 
 distributing tests to local/remote subprocesses
 --------------------------------------------------------
 
-.. _`pytest-xdist`: plugin/xdist.html
-
 - distribute tests to multiple CPUs
 - distribute tests to remote ssh or socket connected machines
 - run tests in subprocess, re-run failing ones on file-change

--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -1,7 +1,7 @@
-import py, os, sys
+import pytest, py, os, sys
 from _pytest.capture import CaptureManager
 
-needsosdup = py.test.mark.xfail("not hasattr(os, 'dup')")
+needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
 
 class TestCaptureManager:
     def test_getmethod_default_no_fd(self, testdir, monkeypatch):
@@ -33,7 +33,7 @@ class TestCaptureManager:
             assert capman._getmethod(config, sub.join("test_hello.py")) == name
 
     @needsosdup
-    @py.test.mark.multi(method=['no', 'fd', 'sys'])
+    @pytest.mark.multi(method=['no', 'fd', 'sys'])
     def test_capturing_basic_api(self, method):
         capouter = py.io.StdCaptureFD()
         old = sys.stdout, sys.stderr, sys.stdin
@@ -63,8 +63,8 @@ class TestCaptureManager:
             config = testdir.parseconfig(testdir.tmpdir)
             capman = CaptureManager()
             capman.resumecapture("fd")
-            py.test.raises(ValueError, 'capman.resumecapture("fd")')
-            py.test.raises(ValueError, 'capman.resumecapture("sys")')
+            pytest.raises(ValueError, 'capman.resumecapture("fd")')
+            pytest.raises(ValueError, 'capman.resumecapture("sys")')
             os.write(1, "hello\n".encode('ascii'))
             out, err = capman.suspendcapture()
             assert out == "hello\n"
@@ -77,7 +77,7 @@ class TestCaptureManager:
         finally:
             capouter.reset()
 
- at py.test.mark.multi(method=['fd', 'sys'])
+ at pytest.mark.multi(method=['fd', 'sys'])
 def test_capturing_unicode(testdir, method):
     if sys.version_info >= (3,0):
         obj = "'b\u00f6y'"
@@ -96,7 +96,7 @@ def test_capturing_unicode(testdir, meth
         "*1 passed*"
     ])
 
- at py.test.mark.multi(method=['fd', 'sys'])
+ at pytest.mark.multi(method=['fd', 'sys'])
 def test_capturing_bytes_in_utf8_encoding(testdir, method):
     testdir.makepyfile("""
         def test_unicode():
@@ -141,7 +141,7 @@ class TestPerTestCapturing:
             "in func2*",
         ])
 
-    @py.test.mark.xfail
+    @pytest.mark.xfail
     def test_capture_scope_cache(self, testdir):
         p = testdir.makepyfile("""
             import sys
@@ -245,7 +245,7 @@ class TestLoggingInteraction:
         p = testdir.makepyfile("""
             def test_logging():
                 import logging
-                import py
+                import pytest
                 stream = py.io.TextIO()
                 logging.basicConfig(stream=stream)
                 stream.close() # to free memory/release resources

--- a/testing/test_recwarn.py
+++ b/testing/test_recwarn.py
@@ -1,4 +1,4 @@
-import py
+import py, pytest
 from _pytest.recwarn import WarningsRecorder
 
 def test_WarningRecorder(recwarn):
@@ -16,7 +16,7 @@ def test_WarningRecorder(recwarn):
     rec.clear()
     assert len(rec.list) == 0
     assert l is rec.list
-    py.test.raises(AssertionError, "rec.pop()")
+    pytest.raises(AssertionError, "rec.pop()")
     rec.finalize()
     assert showwarning == py.std.warnings.showwarning
 
@@ -52,7 +52,7 @@ def dep_explicit(i):
                                       filename="hello", lineno=3)
 
 def test_deprecated_call_raises():
-    excinfo = py.test.raises(AssertionError,
+    excinfo = pytest.raises(AssertionError,
                    "py.test.deprecated_call(dep, 3)")
     assert str(excinfo).find("did not produce") != -1
 
@@ -72,7 +72,7 @@ def test_deprecated_call_preserves():
     assert f == py.std.warnings.filters
 
 def test_deprecated_explicit_call_raises():
-    py.test.raises(AssertionError,
+    pytest.raises(AssertionError,
                    "py.test.deprecated_call(dep_explicit, 3)")
 
 def test_deprecated_explicit_call():

--- a/doc/recwarn.txt
+++ b/doc/recwarn.txt
@@ -31,8 +31,8 @@ You can also call a global helper for ch
 that a certain function call triggers a Deprecation
 warning::
 
-    import py
+    import pytest
 
     def test_global():
-        py.test.deprecated_call(myfunction, 17)
+        pytest.deprecated_call(myfunction, 17)
 

--- a/doc/example/nonpython.txt
+++ b/doc/example/nonpython.txt
@@ -27,7 +27,7 @@ now execute the test specification::
 
     nonpython $ py.test test_simple.yml
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
     test path 1: test_simple.yml
     
     test_simple.yml .F
@@ -39,7 +39,7 @@ now execute the test specification::
        no further details known at this point.
     ========================= short test summary info ==========================
     FAIL test_simple.yml::hello
-    ==================== 1 failed, 1 passed in 0.43 seconds ====================
+    ==================== 1 failed, 1 passed in 0.06 seconds ====================
 
 You get one dot for the passing ``sub1: sub1`` check and one failure.
 Obviously in the above ``conftest.py`` you'll want to implement a more
@@ -58,11 +58,11 @@ reporting in ``verbose`` mode::
 
     nonpython $ py.test -v
     =========================== test session starts ============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev22 -- /home/hpk/venv/0/bin/python
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 -- /home/hpk/venv/0/bin/python
     test path 1: /home/hpk/p/pytest/doc/example/nonpython
     
-    test_simple.yml <- test_simple.yml:1: usecase: ok PASSED
-    test_simple.yml <- test_simple.yml:1: usecase: hello FAILED
+    test_simple.yml:1: usecase: ok PASSED
+    test_simple.yml:1: usecase: hello FAILED
     
     ================================= FAILURES =================================
     ______________________________ usecase: hello ______________________________
@@ -71,7 +71,7 @@ reporting in ``verbose`` mode::
        no further details known at this point.
     ========================= short test summary info ==========================
     FAIL test_simple.yml::hello
-    ==================== 1 failed, 1 passed in 0.07 seconds ====================
+    ==================== 1 failed, 1 passed in 0.06 seconds ====================
 
 While developing your custom test collection and execution it's also
 interesting to just look at the collection tree::

--- a/testing/test_python.py
+++ b/testing/test_python.py
@@ -4,8 +4,8 @@ from _pytest import python as funcargs
 class TestModule:
     def test_failing_import(self, testdir):
         modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
-        py.test.raises(ImportError, modcol.collect)
-        py.test.raises(ImportError, modcol.collect)
+        pytest.raises(ImportError, modcol.collect)
+        pytest.raises(ImportError, modcol.collect)
 
     def test_import_duplicate(self, testdir):
         a = testdir.mkdir("a")
@@ -26,12 +26,12 @@ class TestModule:
 
     def test_syntax_error_in_module(self, testdir):
         modcol = testdir.getmodulecol("this is a syntax error")
-        py.test.raises(modcol.CollectError, modcol.collect)
-        py.test.raises(modcol.CollectError, modcol.collect)
+        pytest.raises(modcol.CollectError, modcol.collect)
+        pytest.raises(modcol.CollectError, modcol.collect)
 
     def test_module_considers_pluginmanager_at_import(self, testdir):
         modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
-        py.test.raises(ImportError, "modcol.obj")
+        pytest.raises(ImportError, "modcol.obj")
 
 class TestClass:
     def test_class_with_init_not_collected(self, testdir):
@@ -119,7 +119,7 @@ class TestGenerator:
         assert len(colitems) == 1
         gencol = colitems[0]
         assert isinstance(gencol, pytest.Generator)
-        py.test.raises(ValueError, "gencol.collect()")
+        pytest.raises(ValueError, "gencol.collect()")
 
     def test_generative_methods_with_explicit_names(self, testdir):
         modcol = testdir.getmodulecol("""
@@ -144,7 +144,7 @@ class TestGenerator:
     def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
         o = testdir.makepyfile("""
             def test_generative_order_of_execution():
-                import py
+                import py, pytest
                 test_list = []
                 expected_list = list(range(6))
 
@@ -168,7 +168,7 @@ class TestGenerator:
     def test_order_of_execution_generator_different_codeline(self, testdir):
         o = testdir.makepyfile("""
             def test_generative_tests_different_codeline():
-                import py
+                import py, pytest
                 test_list = []
                 expected_list = list(range(3))
 
@@ -351,7 +351,7 @@ def test_setup_only_available_in_subdir(
     sub1 = testdir.mkpydir("sub1")
     sub2 = testdir.mkpydir("sub2")
     sub1.join("conftest.py").write(py.code.Source("""
-        import py
+        import pytest
         def pytest_runtest_setup(item):
             assert item.fspath.purebasename == "test_in_sub1"
         def pytest_runtest_call(item):
@@ -360,7 +360,7 @@ def test_setup_only_available_in_subdir(
             assert item.fspath.purebasename == "test_in_sub1"
     """))
     sub2.join("conftest.py").write(py.code.Source("""
-        import py
+        import pytest
         def pytest_runtest_setup(item):
             assert item.fspath.purebasename == "test_in_sub2"
         def pytest_runtest_call(item):
@@ -402,7 +402,7 @@ def test_modulecol_roundtrip(testdir):
 
 class TestTracebackCutting:
     def test_skip_simple(self):
-        excinfo = py.test.raises(py.test.skip.Exception, 'py.test.skip("xxx")')
+        excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")')
         assert excinfo.traceback[-1].frame.code.name == "skip"
         assert excinfo.traceback[-1].ishidden()
 
@@ -480,7 +480,7 @@ class TestFillFuncArgs:
                 return 42
         """)
         item = testdir.getitem("def test_func(some): pass")
-        exc = py.test.raises(funcargs.FuncargRequest.LookupError,
+        exc = pytest.raises(funcargs.FuncargRequest.LookupError,
             "funcargs.fillfuncargs(item)")
         s = str(exc.value)
         assert s.find("xyzsomething") != -1
@@ -611,7 +611,7 @@ class TestRequest:
             def test_func(something): pass
         """)
         req = funcargs.FuncargRequest(item)
-        py.test.raises(req.LookupError, req.getfuncargvalue, "notexists")
+        pytest.raises(req.LookupError, req.getfuncargvalue, "notexists")
         val = req.getfuncargvalue("something")
         assert val == 1
         val = req.getfuncargvalue("something")
@@ -672,12 +672,12 @@ def test_applymarker(testdir):
     """)
     req1 = funcargs.FuncargRequest(item1)
     assert 'xfail' not in item1.keywords
-    req1.applymarker(py.test.mark.xfail)
+    req1.applymarker(pytest.mark.xfail)
     assert 'xfail' in item1.keywords
     assert 'skipif' not in item1.keywords
-    req1.applymarker(py.test.mark.skipif)
+    req1.applymarker(pytest.mark.skipif)
     assert 'skipif' in item1.keywords
-    py.test.raises(ValueError, "req1.applymarker(42)")
+    pytest.raises(ValueError, "req1.applymarker(42)")
 
 class TestRequestCachedSetup:
     def test_request_cachedsetup(self, testdir):
@@ -815,11 +815,11 @@ class TestMetafunc:
     def test_addcall_id(self):
         def func(arg1): pass
         metafunc = funcargs.Metafunc(func)
-        py.test.raises(ValueError, "metafunc.addcall(id=None)")
+        pytest.raises(ValueError, "metafunc.addcall(id=None)")
 
         metafunc.addcall(id=1)
-        py.test.raises(ValueError, "metafunc.addcall(id=1)")
-        py.test.raises(ValueError, "metafunc.addcall(id='1')")
+        pytest.raises(ValueError, "metafunc.addcall(id=1)")
+        pytest.raises(ValueError, "metafunc.addcall(id='1')")
         metafunc.addcall(id=2)
         assert len(metafunc._calls) == 2
         assert metafunc._calls[0].id == "1"
@@ -852,7 +852,7 @@ class TestGenfuncFunctional:
     def test_attributes(self, testdir):
         p = testdir.makepyfile("""
             # assumes that generate/provide runs in the same process
-            import py
+            import py, pytest
             def pytest_generate_tests(metafunc):
                 metafunc.addcall(param=metafunc)
 
@@ -990,14 +990,14 @@ def test_conftest_funcargs_only_availabl
     sub1 = testdir.mkpydir("sub1")
     sub2 = testdir.mkpydir("sub2")
     sub1.join("conftest.py").write(py.code.Source("""
-        import py
+        import pytest
         def pytest_funcarg__arg1(request):
-            py.test.raises(Exception, "request.getfuncargvalue('arg2')")
+            pytest.raises(Exception, "request.getfuncargvalue('arg2')")
     """))
     sub2.join("conftest.py").write(py.code.Source("""
-        import py
+        import pytest
         def pytest_funcarg__arg2(request):
-            py.test.raises(Exception, "request.getfuncargvalue('arg1')")
+            pytest.raises(Exception, "request.getfuncargvalue('arg1')")
     """))
 
     sub1.join("test_in_sub1.py").write("def test_1(arg1): pass")
@@ -1121,50 +1121,50 @@ def test_show_funcarg(testdir):
 class TestRaises:
     def test_raises(self):
         source = "int('qwe')"
-        excinfo = py.test.raises(ValueError, source)
+        excinfo = pytest.raises(ValueError, source)
         code = excinfo.traceback[-1].frame.code
         s = str(code.fullsource)
         assert s == source
 
     def test_raises_exec(self):
-        py.test.raises(ValueError, "a,x = []")
+        pytest.raises(ValueError, "a,x = []")
 
     def test_raises_syntax_error(self):
-        py.test.raises(SyntaxError, "qwe qwe qwe")
+        pytest.raises(SyntaxError, "qwe qwe qwe")
 
     def test_raises_function(self):
-        py.test.raises(ValueError, int, 'hello')
+        pytest.raises(ValueError, int, 'hello')
 
     def test_raises_callable_no_exception(self):
         class A:
             def __call__(self):
                 pass
         try:
-            py.test.raises(ValueError, A())
-        except py.test.raises.Exception:
+            pytest.raises(ValueError, A())
+        except pytest.raises.Exception:
             pass
 
-    @py.test.mark.skipif('sys.version < "2.5"')
+    @pytest.mark.skipif('sys.version < "2.5"')
     def test_raises_as_contextmanager(self, testdir):
         testdir.makepyfile("""
             from __future__ import with_statement
-            import py
+            import py, pytest
 
             def test_simple():
-                with py.test.raises(ZeroDivisionError) as excinfo:
+                with pytest.raises(ZeroDivisionError) as excinfo:
                     assert isinstance(excinfo, py.code.ExceptionInfo)
                     1/0
                 print (excinfo)
                 assert excinfo.type == ZeroDivisionError
 
             def test_noraise():
-                with py.test.raises(py.test.raises.Exception):
-                    with py.test.raises(ValueError):
+                with pytest.raises(pytest.raises.Exception):
+                    with pytest.raises(ValueError):
                            int()
 
             def test_raise_wrong_exception_passes_by():
-                with py.test.raises(ZeroDivisionError):
-                    with py.test.raises(ValueError):
+                with pytest.raises(ZeroDivisionError):
+                    with pytest.raises(ValueError):
                            1/0
         """)
         result = testdir.runpytest()

--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -178,7 +178,7 @@ class TestPrunetraceback:
             "*hello world*",
         ])
 
-    @py.test.mark.xfail(reason="other mechanism for adding to reporting needed")
+    @pytest.mark.xfail(reason="other mechanism for adding to reporting needed")
     def test_collect_report_postprocessing(self, testdir):
         p = testdir.makepyfile("""
             import not_exists
@@ -520,7 +520,7 @@ class Test_genitems:
         # do we want to unify behaviour with
         # test_subdir_conftest_error?
         p = testdir.makepyfile(conftest="raise SyntaxError\n")
-        py.test.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
+        pytest.raises(SyntaxError, testdir.inline_genitems, p.dirpath())
 
     def test_example_items1(self, testdir):
         p = testdir.makepyfile('''



More information about the pytest-commit mailing list