[py-svn] commit/pytest: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Sat Nov 12 00:03:27 CET 2011


6 new commits in pytest:


https://bitbucket.org/hpk42/pytest/changeset/298fbbd8c125/
changeset:   298fbbd8c125
user:        hpk42
date:        2011-11-09 12:04:37
summary:     fix formatting
affected #:  1 file

diff -r 6a24dc78293dda5cd7ba556e4dc454bdb018eb4b -r 298fbbd8c125c5ca8ac4808f13f718f0d03965c6 _pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -17,7 +17,7 @@
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting", "reporting", after="general")
     group.addoption('--durations',
-         action="store", type="int", dest="durations", default=None, metavar="N",
+         action="store", type="int", default=None, metavar="N",
          help="show N slowest setup/test durations (N=0 for all)."),
 
 def pytest_terminal_summary(terminalreporter):
@@ -35,23 +35,16 @@
     d2 = list(duration2rep.items())
     d2.sort()
     d2.reverse()
-    #remaining = []
     if not durations:
         tr.write_sep("=", "slowest test durations")
     else:
         tr.write_sep("=", "slowest %s test durations" % durations)
-        #remaining = d2[durations:]
         d2 = d2[:durations]
 
     for duration, rep in d2:
         nodeid = rep.nodeid.replace("::()::", "::")
         tr.write_line("%02.2fs %s %s" %
             (duration, rep.when, nodeid))
-    #if remaining:
-    #    remsum = sum(map(lambda x: x[0], remaining))
-    #    tr.write_line("%02.2fs spent in %d remaining test phases" %(
-    #        remsum, len(remaining)))
-
 
 def pytest_sessionstart(session):
     session._setupstate = SetupState()



https://bitbucket.org/hpk42/pytest/changeset/40c7458fb0c8/
changeset:   40c7458fb0c8
user:        hpk42
date:        2011-11-11 22:33:45
summary:     skip pexpect tests on darwin
affected #:  1 file

diff -r 298fbbd8c125c5ca8ac4808f13f718f0d03965c6 -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -516,6 +516,8 @@
         pexpect = py.test.importorskip("pexpect", "2.4")
         if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
             pytest.skip("pypy-64 bit not supported")
+        if sys.platform == "darwin":
+            pytest.xfail("pexpect does not work reliably on darwin?!")
         logfile = self.tmpdir.join("spawn.out")
         child = pexpect.spawn(cmd, logfile=logfile.open("w"))
         child.timeout = expect_timeout



https://bitbucket.org/hpk42/pytest/changeset/7535fc2cb387/
changeset:   7535fc2cb387
user:        hpk42
date:        2011-11-11 23:56:06
summary:     improve mark.txt document and add new regristration/markers features.
(welcome to documentation driven development)
affected #:  4 files

diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/example/index.txt
--- a/doc/example/index.txt
+++ b/doc/example/index.txt
@@ -18,5 +18,6 @@
    simple.txt
    mysetup.txt
    parametrize.txt
+   markers.txt
    pythoncollection.txt
    nonpython.txt


diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/example/markers.txt
--- /dev/null
+++ b/doc/example/markers.txt
@@ -0,0 +1,83 @@
+
+Working with custom markers
+=================================================
+
+
+Here are some example using the :ref:`mark` mechanism.
+
+.. _`adding a custom marker from a plugin`:
+
+custom marker and command line option to control test runs
+----------------------------------------------------------
+
+Plugins can provide custom markers and implement specific behaviour
+based on it. This is a self-contained example which adds a command
+line option and a parametrized test function marker to run tests
+specifies via named environments::
+
+    # content of conftest.py
+
+    import pytest
+    def pytest_addoption(parser):
+        parser.addoption("-E", dest="env", action="store", metavar="NAME",
+            help="only run tests matching the environment NAME.")
+
+    def pytest_configure(config):
+        # register an additional marker
+        config.addinivalue_line("markers", 
+            "env(name): mark test to run only on named environment")
+
+    def pytest_runtest_setup(item):
+        if not isinstance(item, item.Function):
+            return
+        if hasattr(item.obj, 'env'):
+            envmarker = getattr(item.obj, 'env')
+            envname = envmarker.args[0]
+            if envname != item.config.option.env:
+                pytest.skip("test requires env %r" % envname)
+
+A test file using this local plugin::
+
+    # content of test_someenv.py
+
+    import pytest
+    @pytest.mark.env("stage1")
+    def test_basic_db_operation():
+        pass
+
+and an example invocations specifying a different environment than what
+the test needs::
+
+    $ py.test -E stage2
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6
+    collecting ... collected 1 items
+    
+    test_someenv.py s
+    
+    ========================== 1 skipped in 0.02 seconds ===========================
+  
+and here is one that specifies exactly the environment needed::
+
+    $ py.test -E stage1
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6
+    collecting ... collected 1 items
+    
+    test_someenv.py .
+    
+    =========================== 1 passed in 0.02 seconds ===========================
+
+The ``--markers`` option always gives you a list of available markers::
+
+    $ py.test --markers
+    @pytest.mark.env(name): mark test to run only on named environment
+    
+    @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value.  Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. 
+    
+    @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
+    
+    @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+    
+    @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+    


diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/mark.txt
--- a/doc/mark.txt
+++ b/doc/mark.txt
@@ -6,37 +6,71 @@
 
 .. currentmodule:: _pytest.mark
 
-By using the ``pytest.mark`` helper you can instantiate
-decorators that will set named metadata on test functions.
+By using the ``pytest.mark`` helper you can easily set
+metadata on your test functions. To begin with, there are
+some builtin markers, for example:
 
-Marking a single function
+* skipif - skip a test function if a certain condition is met
+* xfail - produce an "expected failure" outcome if a certain
+  condition is met
+
+It's also easy to create custom markers or to apply markers
+to whole test classes or modules.
+
+marking test functions and selecting them for a run
 ----------------------------------------------------
 
-You can "mark" a test function with metadata like this::
+You can "mark" a test function with custom metadata like this::
+
+    # content of test_server.py
 
     import pytest
     @pytest.mark.webtest
     def test_send_http():
-        ...
+        pass # perform some webtest test for your app
 
-This will set the function attribute ``webtest`` to a :py:class:`MarkInfo`
-instance.  You can also specify parametrized metadata like this::
+.. versionadded:: 2.2
 
-    # content of test_mark.py
+You can restrict a test run only tests marked with ``webtest`` like this::
 
-    import pytest
-    @pytest.mark.webtest(firefox=30)
-    def test_receive():
-        pass
+    $ py.test -m webtest
 
-    @pytest.mark.webtest("functional", firefox=30)
-    def test_run_and_look():
-        pass
+Or the inverse, running all tests except the webtest ones::
+    
+    $ py.test -m "not webtest"
 
-and access it from other places like this::
+Registering markers
+-------------------------------------
 
-    test_receive.webtest.kwargs['firefox'] == 30
-    test_run_and_look.webtest.args[0] == "functional"
+.. versionadded:: 2.2
+
+.. ini-syntax for custom markers:
+
+Registering markers for your test suite is simple::
+
+    # content of pytest.ini
+    [pytest]
+    markers = 
+        webtest: mark a test as a webtest. 
+
+You can ask which markers exist for your test suite::
+
+    $ py.test --markers
+
+For an example on how to add and work markers from a plugin, see 
+:ref:`adding a custom marker from a plugin`.
+
+.. note::
+
+    It is recommended to explicitely register markers so that:
+
+    * there is one place in your test suite defining your markers
+
+    * asking for existing markers via ``py.test --markers`` gives good output
+
+    * typos in function markers can be treated as an error if you use
+      the :ref:`--strict` option.  Later versions of py.test might treat
+      non-registered markers as an error by default.
 
 .. _`scoped-marking`:
 
@@ -58,7 +92,7 @@
 This is equivalent to directly applying the decorator to the
 two test functions.
 
-To remain compatible with Python2.5 you can also set a
+To remain backward-compatible with Python2.4 you can also set a
 ``pytestmark`` attribute on a TestClass like this::
 
     import pytest


diff -r 40c7458fb0c8dc5d0bf4e90e36133716fe0afe33 -r 7535fc2cb387702e00924bad69681072a270f48a doc/plugins.txt
--- a/doc/plugins.txt
+++ b/doc/plugins.txt
@@ -327,7 +327,6 @@
 
 .. autofunction: pytest_runtest_logreport
 
-
 Reference of important objects involved in hooks
 ===========================================================
 



https://bitbucket.org/hpk42/pytest/changeset/37de67a8bea0/
changeset:   37de67a8bea0
user:        hpk42
date:        2011-11-11 23:56:08
summary:     add a method to the config object to dynamically add a value to an (line-type) ini-value
affected #:  2 files

diff -r 7535fc2cb387702e00924bad69681072a270f48a -r 37de67a8bea01ff1de584d6d2bd813b53d412817 _pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -83,6 +83,7 @@
         self._inidict[name] = (help, type, default)
         self._ininames.append(name)
 
+
 class OptionGroup:
     def __init__(self, name, description="", parser=None):
         self.name = name
@@ -346,6 +347,14 @@
             args.append(py.std.os.getcwd())
         self.args = args
 
+    def addinivalue_line(self, name, line):
+        """ add a line to an ini-file option. The option must have been
+        declared but might not yet be set in which case the line becomes the
+        the first line in its value. """
+        x = self.getini(name)
+        assert isinstance(x, list)
+        x.append(line) # modifies the cached list inline
+
     def getini(self, name):
         """ return configuration value from an ini file. If the
         specified name hasn't been registered through a prior ``parse.addini``


diff -r 7535fc2cb387702e00924bad69681072a270f48a -r 37de67a8bea01ff1de584d6d2bd813b53d412817 testing/test_config.py
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -208,6 +208,40 @@
         l = config.getini("a2")
         assert l == []
 
+    def test_addinivalue_line_existing(self, testdir):
+        testdir.makeconftest("""
+            def pytest_addoption(parser):
+                parser.addini("xy", "", type="linelist")
+        """)
+        p = testdir.makeini("""
+            [pytest]
+            xy= 123
+        """)
+        config = testdir.parseconfig()
+        l = config.getini("xy")
+        assert len(l) == 1
+        assert l == ["123"]
+        config.addinivalue_line("xy", "456")
+        l = config.getini("xy")
+        assert len(l) == 2
+        assert l == ["123", "456"]
+
+    def test_addinivalue_line_new(self, testdir):
+        testdir.makeconftest("""
+            def pytest_addoption(parser):
+                parser.addini("xy", "", type="linelist")
+        """)
+        config = testdir.parseconfig()
+        assert not config.getini("xy")
+        config.addinivalue_line("xy", "456")
+        l = config.getini("xy")
+        assert len(l) == 1
+        assert l == ["456"]
+        config.addinivalue_line("xy", "123")
+        l = config.getini("xy")
+        assert len(l) == 2
+        assert l == ["456", "123"]
+
 def test_options_on_small_file_do_not_blow_up(testdir):
     def runfiletest(opts):
         reprec = testdir.inline_run(*opts)



https://bitbucket.org/hpk42/pytest/changeset/96a571acab65/
changeset:   96a571acab65
user:        hpk42
date:        2011-11-11 23:56:11
summary:     add ini-file "markers" option and a cmdline option "--markers" to show defined markers.  Add "skipif", "xfail" etc. to the set of builtin markers shown with the --markers option.
affected #:  12 files

diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,8 +1,18 @@
 Changes between 2.1.3 and XXX 2.2.0
 ----------------------------------------
 
-- new feature to help optimizing your tests: --durations=N option for 
-  displaying N slowest test calls and setup/teardown methods.
+- introduce registration for "pytest.mark.*" helpers via ini-files
+  or through plugin hooks.  Also introduce a "--strict" option which 
+  will treat unregistered markers as errors
+  allowing to avoid typos and maintain a well described set of markers
+  for your test suite.  See exaples at http://pytest.org/latest/mark.html
+  and its links.
+- XXX introduce "-m marker" option to select tests based on markers
+  (this is a stricter more predictable version of '-k' which also matches
+  substrings and compares against the test function name etc.)
+- new feature to help optimizing the speed of your tests: 
+  --durations=N option for displaying N slowest test calls 
+  and setup/teardown methods.
 - fix and cleanup pytest's own test suite to not leak FDs 
 - fix issue83: link to generated funcarg list
 - fix issue74: pyarg module names are now checked against imp.find_module false positives


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.2.0.dev5'
+__version__ = '2.2.0.dev6'


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/core.py
--- a/_pytest/core.py
+++ b/_pytest/core.py
@@ -211,6 +211,14 @@
             self.register(mod, modname)
             self.consider_module(mod)
 
+    def pytest_configure(self, config):
+        config.addinivalue_line("markers",
+            "tryfirst: mark a hook implementation function such that the "
+            "plugin machinery will try to call it first/as early as possible.")
+        config.addinivalue_line("markers",
+            "trylast: mark a hook implementation function such that the "
+            "plugin machinery will try to call it last/as late as possible.")
+
     def pytest_plugin_registered(self, plugin):
         import pytest
         dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -29,6 +29,9 @@
                action="store", type="int", dest="maxfail", default=0,
                help="exit after first num failures or errors.")
 
+    group._addoption('--strict', action="store_true",
+               help="run pytest in strict mode, warnings become errors.")
+
     group = parser.getgroup("collect", "collection")
     group.addoption('--collectonly',
         action="store_true", dest="collectonly",


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/mark.py
--- a/_pytest/mark.py
+++ b/_pytest/mark.py
@@ -14,6 +14,24 @@
              "Terminate expression with ':' to make the first match match "
              "all subsequent tests (usually file-order). ")
 
+    group.addoption("--markers", action="store_true", help=
+        "show markers (builtin, plugin and per-project ones).")
+
+    parser.addini("markers", "markers for test functions", 'linelist')
+
+def pytest_cmdline_main(config):
+    if config.option.markers:
+        config.pluginmanager.do_configure(config)
+        tw = py.io.TerminalWriter()
+        for line in config.getini("markers"):
+            name, rest = line.split(":", 1)
+            tw.write("@pytest.mark.%s:" %  name, bold=True)
+            tw.line(rest)
+            tw.line()
+        config.pluginmanager.do_unconfigure(config)
+        return 0
+pytest_cmdline_main.tryfirst = True
+
 def pytest_collection_modifyitems(items, config):
     keywordexpr = config.option.keyword
     if not keywordexpr:
@@ -37,13 +55,17 @@
         config.hook.pytest_deselected(items=deselected)
         items[:] = remaining
 
+def pytest_configure(config):
+    if config.option.strict:
+        pytest.mark._config = config
+
 def skipbykeyword(colitem, keywordexpr):
     """ return True if they given keyword expression means to
         skip this collector/item.
     """
     if not keywordexpr:
         return
-    
+
     itemkeywords = getkeywords(colitem)
     for key in filter(None, keywordexpr.split()):
         eor = key[:1] == '-'
@@ -77,15 +99,31 @@
          @py.test.mark.slowtest
          def test_function():
             pass
-  
+
     will set a 'slowtest' :class:`MarkInfo` object
     on the ``test_function`` object. """
 
     def __getattr__(self, name):
         if name[0] == "_":
             raise AttributeError(name)
+        if hasattr(self, '_config'):
+            self._check(name)
         return MarkDecorator(name)
 
+    def _check(self, name):
+        try:
+            if name in self._markers:
+                return
+        except AttributeError:
+            pass
+        self._markers = l = set()
+        for line in self._config.getini("markers"):
+            beginning = line.split(":", 1)
+            x = beginning[0].split("(", 1)[0]
+            l.add(x)
+        if name not in self._markers:
+            raise AttributeError("%r not a registered marker" % (name,))
+
 class MarkDecorator:
     """ A decorator for test functions and test classes.  When applied
     it will create :class:`MarkInfo` objects which may be


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c _pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -9,6 +9,21 @@
            action="store_true", dest="runxfail", default=False,
            help="run tests even if they are marked xfail")
 
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "skipif(*conditions): skip the given test function if evaluation "
+        "of all conditions has a True value.  Evaluation happens within the "
+        "module global context. Example: skipif('sys.platform == \"win32\"') "
+        "skips the test if we are on the win32 platform. "
+    )
+    config.addinivalue_line("markers",
+        "xfail(*conditions, reason=None, run=True): mark the the test function "
+        "as an expected failure. Optionally specify a reason and run=False "
+        "if you don't even want to execute the test function. Any positional "
+        "condition strings will be evaluated (like with skipif) and if one is "
+        "False the marker will not be applied."
+    )
+
 def pytest_namespace():
     return dict(xfail=xfail)
 


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c doc/mark.txt
--- a/doc/mark.txt
+++ b/doc/mark.txt
@@ -68,9 +68,9 @@
 
     * asking for existing markers via ``py.test --markers`` gives good output
 
-    * typos in function markers can be treated as an error if you use
-      the :ref:`--strict` option.  Later versions of py.test might treat
-      non-registered markers as an error by default.
+    * typos in function markers are treated as an error if you use
+      the ``--strict`` option. Later versions of py.test are probably
+      going to treat non-registered markers as an error.
 
 .. _`scoped-marking`:
 


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
         name='pytest',
         description='py.test: simple powerful testing with Python',
         long_description = long_description,
-        version='2.2.0.dev5',
+        version='2.2.0.dev6',
         url='http://pytest.org',
         license='MIT license',
         platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/conftest.py
--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -12,6 +12,10 @@
            help=("run FD checks if lsof is available"))
 
 def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "multi(arg=[value1,value2, ...]): call the test function "
+        "multiple times with arg=value1, then with arg=value2, ... "
+    )
     if config.getvalue("lsof"):
         try:
             out = py.process.cmdexec("lsof -p %d" % pid)


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_core.py
--- a/testing/test_core.py
+++ b/testing/test_core.py
@@ -644,3 +644,10 @@
         assert "1" in tags
         assert "2" in tags
         assert args == (42,)
+
+def test_default_markers(testdir):
+    result = testdir.runpytest("--markers")
+    result.stdout.fnmatch_lines([
+        "*tryfirst*first*",
+        "*trylast*last*",
+    ])


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_mark.py
--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -68,7 +68,54 @@
         assert 'reason' not in g.some.kwargs
         assert g.some.kwargs['reason2'] == "456"
 
+
+def test_ini_markers(testdir):
+    testdir.makeini("""
+        [pytest]
+        markers =
+            a1: this is a webtest marker
+            a2: this is a smoke marker
+    """)
+    testdir.makepyfile("""
+        def test_markers(pytestconfig):
+            markers = pytestconfig.getini("markers")
+            print (markers)
+            assert len(markers) >= 2
+            assert markers[0].startswith("a1:")
+            assert markers[1].startswith("a2:")
+    """)
+    rec = testdir.inline_run()
+    rec.assertoutcome(passed=1)
+
+def test_markers_option(testdir):
+    testdir.makeini("""
+        [pytest]
+        markers =
+            a1: this is a webtest marker
+            a1some: another marker
+    """)
+    result = testdir.runpytest("--markers", )
+    result.stdout.fnmatch_lines([
+        "*a1*this is a webtest*",
+        "*a1some*another marker",
+    ])
+
+
+def test_strict_prohibits_unregistered_markers(testdir):
+    testdir.makepyfile("""
+        import pytest
+        @pytest.mark.unregisteredmark
+        def test_hello():
+            pass
+    """)
+    result = testdir.runpytest("--strict")
+    assert result.ret != 0
+    result.stdout.fnmatch_lines([
+        "*unregisteredmark*not*registered*",
+    ])
+
 class TestFunctional:
+
     def test_mark_per_function(self, testdir):
         p = testdir.makepyfile("""
             import pytest


diff -r 37de67a8bea01ff1de584d6d2bd813b53d412817 -r 96a571acab657356d2b2ddd81c89df610d77025c testing/test_skipping.py
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -549,3 +549,10 @@
     ])
 
 
+def test_default_markers(testdir):
+    result = testdir.runpytest("--markers")
+    result.stdout.fnmatch_lines([
+        "*skipif(*conditions)*skip*",
+        "*xfail(*conditions, reason=None, run=True)*expected failure*",
+    ])
+



https://bitbucket.org/hpk42/pytest/changeset/d8cce78bee2b/
changeset:   d8cce78bee2b
user:        hpk42
date:        2011-11-12 00:02:06
summary:     introduce a new -m mark_expression option
affected #:  9 files

diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -7,9 +7,10 @@
   allowing to avoid typos and maintain a well described set of markers
   for your test suite.  See exaples at http://pytest.org/latest/mark.html
   and its links.
-- XXX introduce "-m marker" option to select tests based on markers
-  (this is a stricter more predictable version of '-k' which also matches
-  substrings and compares against the test function name etc.)
+- introduce "-m marker" option to select tests based on markers
+  (this is a stricter and more predictable version of '-k' in that
+  "-m" only matches complete markers and has more obvious rules
+  for and/or semantics.
 - new feature to help optimizing the speed of your tests: 
   --durations=N option for displaying N slowest test calls 
   and setup/teardown methods.


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.2.0.dev6'
+__version__ = '2.2.0.dev7'


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/mark.py
--- a/_pytest/mark.py
+++ b/_pytest/mark.py
@@ -14,6 +14,12 @@
              "Terminate expression with ':' to make the first match match "
              "all subsequent tests (usually file-order). ")
 
+    group._addoption("-m",
+        action="store", dest="markexpr", default="", metavar="MARKEXPR",
+        help="only run tests which match given mark expression.  "
+             "An expression is a python expression which can use "
+             "marker names.")
+
     group.addoption("--markers", action="store_true", help=
         "show markers (builtin, plugin and per-project ones).")
 
@@ -34,10 +40,11 @@
 
 def pytest_collection_modifyitems(items, config):
     keywordexpr = config.option.keyword
-    if not keywordexpr:
+    matchexpr = config.option.markexpr
+    if not keywordexpr and not matchexpr:
         return
     selectuntil = False
-    if keywordexpr[-1] == ":":
+    if keywordexpr[-1:] == ":":
         selectuntil = True
         keywordexpr = keywordexpr[:-1]
 
@@ -47,14 +54,27 @@
         if keywordexpr and skipbykeyword(colitem, keywordexpr):
             deselected.append(colitem)
         else:
-            remaining.append(colitem)
             if selectuntil:
                 keywordexpr = None
+            if matchexpr:
+                if not matchmark(colitem, matchexpr):
+                    deselected.append(colitem)
+                    continue
+            remaining.append(colitem)
 
     if deselected:
         config.hook.pytest_deselected(items=deselected)
         items[:] = remaining
 
+class BoolDict:
+    def __init__(self, mydict):
+        self._mydict = mydict
+    def __getitem__(self, name):
+        return name in self._mydict
+
+def matchmark(colitem, matchexpr):
+    return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__))
+
 def pytest_configure(config):
     if config.option.strict:
         pytest.mark._config = config


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b _pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -440,8 +440,15 @@
 
     def summary_deselected(self):
         if 'deselected' in self.stats:
+            l = []
+            k = self.config.option.keyword
+            if k:
+                l.append("-k%s" % k)
+            m = self.config.option.markexpr
+            if m:
+                l.append("-m %r" % m)
             self.write_sep("=", "%d tests deselected by %r" %(
-                len(self.stats['deselected']), self.config.option.keyword), bold=True)
+                len(self.stats['deselected']), " ".join(l)), bold=True)
 
 def repr_pythonversion(v=None):
     if v is None:


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/Makefile
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -40,7 +40,7 @@
 	-rm -rf $(BUILDDIR)/*
 
 install: html
-	@rsync -avz _build/html/ pytest.org:/www/pytest.org/latest
+	@rsync -avz _build/html/ pytest.org:/www/pytest.org/2.2.0.dev7
 
 installpdf: latexpdf
 	@scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/index.txt
--- a/doc/index.txt
+++ b/doc/index.txt
@@ -26,8 +26,8 @@
 - **supports functional testing and complex test setups**
 
  - (new in 2.2) :ref:`durations`
+ - (much improved in 2.2) :ref:`marking and test selection <mark>`
  - advanced :ref:`skip and xfail`
- - generic :ref:`marking and test selection <mark>`
  - can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
  - can :ref:`continuously re-run failing tests <looponfailing>`
  - many :ref:`builtin helpers <pytest helpers>`


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b doc/mark.txt
--- a/doc/mark.txt
+++ b/doc/mark.txt
@@ -28,16 +28,34 @@
     @pytest.mark.webtest
     def test_send_http():
         pass # perform some webtest test for your app
+    def test_something_quick():
+        pass
 
 .. versionadded:: 2.2
 
-You can restrict a test run only tests marked with ``webtest`` like this::
+You can then restrict a test run to only run tests marked with ``webtest``::
 
-    $ py.test -m webtest
+    $ py.test -v -m webtest
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python
+    collecting ... collected 2 items
+    
+    test_server.py:3: test_send_http PASSED
+    
+    ===================== 1 tests deselected by "-m 'webtest'" =====================
+    ==================== 1 passed, 1 deselected in 0.01 seconds ====================
 
 Or the inverse, running all tests except the webtest ones::
     
-    $ py.test -m "not webtest"
+    $ py.test -v -m "not webtest"
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6 -- /Users/hpk/venv/0/bin/python
+    collecting ... collected 2 items
+    
+    test_server.py:6: test_something_quick PASSED
+    
+    =================== 1 tests deselected by "-m 'not webtest'" ===================
+    ==================== 1 passed, 1 deselected in 0.01 seconds ====================
 
 Registering markers
 -------------------------------------
@@ -53,9 +71,19 @@
     markers = 
         webtest: mark a test as a webtest. 
 
-You can ask which markers exist for your test suite::
+You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
 
     $ py.test --markers
+    @pytest.mark.webtest: mark a test as a webtest.
+    
+    @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value.  Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. 
+    
+    @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
+    
+    @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+    
+    @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+    
 
 For an example on how to add and work markers from a plugin, see 
 :ref:`adding a custom marker from a plugin`.
@@ -118,39 +146,42 @@
 Using ``-k TEXT`` to select tests
 ----------------------------------------------------
 
-You can use the ``-k`` command line option to select tests::
+You can use the ``-k`` command line option to only run tests with names that match the given argument::
 
-    $ py.test -k webtest  # running with the above defined examples yields
-    =========================== test session starts ============================
-    platform darwin -- Python 2.7.1 -- pytest-2.1.3
+    $ py.test -k send_http  # running with the above defined examples
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6
     collecting ... collected 4 items
     
-    test_mark.py ..
-    test_mark_classlevel.py ..
+    test_server.py .
     
-    ========================= 4 passed in 0.03 seconds =========================
+    ===================== 3 tests deselected by '-ksend_http' ======================
+    ==================== 1 passed, 3 deselected in 0.02 seconds ====================
 
 And you can also run all tests except the ones that match the keyword::
 
-    $ py.test -k-webtest
-    =========================== test session starts ============================
-    platform darwin -- Python 2.7.1 -- pytest-2.1.3
+    $ py.test -k-send_http
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6
     collecting ... collected 4 items
     
-    ===================== 4 tests deselected by '-webtest' =====================
-    ======================= 4 deselected in 0.02 seconds =======================
+    test_mark_classlevel.py ..
+    test_server.py .
+    
+    ===================== 1 tests deselected by '-k-send_http' =====================
+    ==================== 3 passed, 1 deselected in 0.03 seconds ====================
 
 Or to only select the class::
 
     $ py.test -kTestClass
-    =========================== test session starts ============================
-    platform darwin -- Python 2.7.1 -- pytest-2.1.3
+    ============================= test session starts ==============================
+    platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev6
     collecting ... collected 4 items
     
     test_mark_classlevel.py ..
     
-    ==================== 2 tests deselected by 'TestClass' =====================
-    ================== 2 passed, 2 deselected in 0.02 seconds ==================
+    ===================== 2 tests deselected by '-kTestClass' ======================
+    ==================== 2 passed, 2 deselected in 0.02 seconds ====================
 
 API reference for mark related objects
 ------------------------------------------------


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
         name='pytest',
         description='py.test: simple powerful testing with Python',
         long_description = long_description,
-        version='2.2.0.dev6',
+        version='2.2.0.dev7',
         url='http://pytest.org',
         license='MIT license',
         platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],


diff -r 96a571acab657356d2b2ddd81c89df610d77025c -r d8cce78bee2bc2b26de9a2890ec20239cc425d5b testing/test_mark.py
--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -114,6 +114,30 @@
         "*unregisteredmark*not*registered*",
     ])
 
+ at pytest.mark.multi(spec=[
+        ("xyz", ("test_one",)),
+        ("xyz and xyz2", ()),
+        ("xyz2", ("test_two",)),
+        ("xyz or xyz2", ("test_one", "test_two"),)
+])
+def test_mark_option(spec, testdir):
+    testdir.makepyfile("""
+        import pytest
+        @pytest.mark.xyz
+        def test_one():
+            pass
+        @pytest.mark.xyz2
+        def test_two():
+            pass
+    """)
+    opt, passed_result = spec
+    rec = testdir.inline_run("-m", opt)
+    passed, skipped, fail = rec.listoutcomes()
+    passed = [x.nodeid.split("::")[-1] for x in passed]
+    assert len(passed) == len(passed_result)
+    assert list(passed) == list(passed_result)
+
+
 class TestFunctional:
 
     def test_mark_per_function(self, testdir):

Repository URL: https://bitbucket.org/hpk42/pytest/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the pytest-commit mailing list