[py-svn] commit/pytest: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Nov 8 18:54:58 CET 2011


2 new commits in pytest:


https://bitbucket.org/hpk42/pytest/changeset/c6cba3e09301/
changeset:   c6cba3e09301
user:        hpk42
date:        2011-11-08 18:20:56
summary:     introduce --durations=N showing slowest test executions
affected #:  10 files

diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,6 +1,8 @@
 Changes between 2.1.3 and [next version]
 ----------------------------------------
 
+- new feature to help optimizing your tests: --durations=N option for 
+  displaying N slowest test calls
 - fix and cleanup pytest's own test suite to not leak FDs 
 - fix issue83: link to generated funcarg list
 - fix issue74: pyarg module names are now checked against imp.find_module false positives


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.1.4.dev3'
+__version__ = '2.2.0.dev1'


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -549,10 +549,17 @@
     def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
         return [x.report for x in self.getcalls(names)]
 
-    def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
+    def matchreport(self, inamepart="",
+        names="pytest_runtest_logreport pytest_collectreport", when=None):
         """ return a testreport whose dotted import path matches """
         l = []
         for rep in self.getreports(names=names):
+            try:
+                if not when and rep.when != "call" and rep.passed:
+                    # setup/teardown passing reports - let's ignore those
+                    continue
+            except AttributeError:
+                pass
             if when and getattr(rep, 'when', None) != when:
                 continue
             if not inamepart or inamepart in rep.nodeid.split("::"):


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 _pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -14,6 +14,38 @@
 #
 # pytest plugin hooks
 
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "reporting", after="general")
+    group.addoption('--durations',
+         action="store", type="int", dest="durations", default=None, metavar="N",
+         help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+    durations = terminalreporter.config.option.durations
+    if durations is None:
+        return
+    tr = terminalreporter
+    duration2rep = {}
+    for key, replist in tr.stats.items():
+        if key == "deselected":
+            continue
+        for rep in replist:
+            duration2rep[rep.duration] = rep
+    if not duration2rep:
+        return
+    d2 = duration2rep.items()
+    d2.sort()
+    d2.reverse()
+    if not durations:
+        tr.write_sep("=", "slowest test durations")
+    else:
+        tr.write_sep("=", "slowest %s test durations" % durations)
+        d2 = d2[:durations]
+
+    for duration, rep in d2:
+        nodeid = rep.nodeid.replace("::()::", "::")
+        tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid))
+
 def pytest_sessionstart(session):
     session._setupstate = SetupState()
 
@@ -185,13 +217,13 @@
         #: a name -> value dictionary containing all keywords and
         #: markers associated with a test invocation.
         self.keywords = keywords
-        
+
         #: test outcome, always one of "passed", "failed", "skipped".
         self.outcome = outcome
 
         #: None or a failure representation.
         self.longrepr = longrepr
-        
+
         #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
         self.when = when
 


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/announce/release-2.2.0.txt
--- /dev/null
+++ b/doc/announce/release-2.2.0.txt
@@ -0,0 +1,21 @@
+py.test 2.2.0: new test duration profiling and bug fixes
+===========================================================================
+
+pytest-2.2.0 is a backward compatible release of the popular
+py.test testing tool.  It introduces the new "--duration=N" option
+showing the N slowest test execution or setup/teardown calls.   The
+release also contains a few fixes and some cleanup of pytest's own test
+suite allowing it to run on a wider range of environments.
+
+For general information, see extensive docs with examples here:
+
+     http://pytest.org/
+
+If you want to install or upgrade pytest you might just type::
+
+    pip install -U pytest # or
+    easy_install -U pytest
+
+best,
+holger krekel
+


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/example/simple.txt
--- a/doc/example/simple.txt
+++ b/doc/example/simple.txt
@@ -299,3 +299,30 @@
     collecting ... collected 0 items
     
     =============================  in 0.00 seconds =============================
+
+profiling test duration
+--------------------------
+
+.. regendoc:wipe
+
+.. versionadded: 2.2
+
+If you have a slow running large test suite you might want to find
+out which tests are slowest. Let's make an artifical test suite::
+
+    # content of test_some_are_slow.py
+   
+    import time 
+
+    def test_funcfast():
+        pass
+    
+    def test_funcslow1():
+        time.sleep(0.1)
+        
+    def test_funcslow2():
+        time.sleep(0.2)
+   
+Now we can profile which test functions execute slowest::
+
+    $ py.test --durations=3


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/index.txt
--- a/doc/index.txt
+++ b/doc/index.txt
@@ -25,6 +25,7 @@
 
 - **supports functional testing and complex test setups**
 
+ - (new in 2.2) :ref:`durations`
  - advanced :ref:`skip and xfail`
  - generic :ref:`marking and test selection <mark>`
  - can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 doc/usage.txt
--- a/doc/usage.txt
+++ b/doc/usage.txt
@@ -98,6 +98,18 @@
 In previous versions you could only enter PDB tracing if
 you disable capturing on the command line via ``py.test -s``.
 
+.. _durations:
+
+Profiling test execution duration 
+-------------------------------------
+
+.. versionadded: 2.2
+
+To get a list of the slowest 10 test durations::
+
+    py.test --durations=10
+
+
 Creating JUnitXML format files
 ----------------------------------------------------
 


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
         name='pytest',
         description='py.test: simple powerful testing with Python',
         long_description = long_description,
-        version='2.1.4.dev3',
+        version='2.2.0.dev1',
         url='http://pytest.org',
         license='MIT license',
         platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],


diff -r e29e0af293e6fa3cfe09b5184c8ff8d7e03dc3fe -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -465,3 +465,48 @@
             "*1 failed*",
         ])
 
+def test_duration_test(testdir):
+    testdir.makepyfile("""
+        import time
+        frag = 0.01
+        def test_2():
+            time.sleep(frag*2)
+        def test_1():
+            time.sleep(frag)
+        def test_3():
+            time.sleep(frag*3)
+    """)
+    result = testdir.runpytest("--durations=10")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*durations*",
+        "*call*test_3*",
+        "*call*test_2*",
+        "*call*test_1*",
+    ])
+    result = testdir.runpytest("--durations=2")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*durations*",
+        "*call*test_3*",
+        "*call*test_2*",
+    ])
+    assert "test_1" not in result.stdout.str()
+    result = testdir.runpytest("--durations=0")
+    assert result.ret == 0
+    for x in "123":
+        for y in 'call',: #'setup', 'call', 'teardown':
+            l = []
+            for line in result.stdout.lines:
+                if ("test_%s" % x) in line and y in line:
+                    break
+            else:
+                raise AssertionError("not found %s %s" % (x,y))
+
+    result = testdir.runpytest("--durations=2", "-k test_1")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*durations*",
+        "*call*test_1*",
+    ])
+



https://bitbucket.org/hpk42/pytest/changeset/70a660cea75e/
changeset:   70a660cea75e
user:        hpk42
date:        2011-11-08 18:53:46
summary:     make --durations also show the execution times of setup/teardown calls. This requires a slight incompatibility - pytest_runtest_logreport now sees setup/teardown reports even if the tests passed.
affected #:  12 files

diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 CHANGELOG
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,8 +1,8 @@
-Changes between 2.1.3 and [next version]
+Changes between 2.1.3 and XXX 2.2.0
 ----------------------------------------
 
 - new feature to help optimizing your tests: --durations=N option for 
-  displaying N slowest test calls
+  displaying N slowest test calls and setup/teardown methods.
 - fix and cleanup pytest's own test suite to not leak FDs 
 - fix issue83: link to generated funcarg list
 - fix issue74: pyarg module names are now checked against imp.find_module false positives


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.2.0.dev1'
+__version__ = '2.2.0.dev2'


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/hookspec.py
--- a/_pytest/hookspec.py
+++ b/_pytest/hookspec.py
@@ -149,7 +149,8 @@
 pytest_runtest_makereport.firstresult = True
 
 def pytest_runtest_logreport(report):
-    """ process item test report. """
+    """ process a test setup/call/teardown report relating to
+    the respective phase of executing a test. """
 
 # special handling for final teardown - somewhat internal for now
 def pytest__teardown_final(session):


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/junitxml.py
--- a/_pytest/junitxml.py
+++ b/_pytest/junitxml.py
@@ -166,7 +166,8 @@
 
     def pytest_runtest_logreport(self, report):
         if report.passed:
-            self.append_pass(report)
+            if report.when == "call": # ignore setup/teardown
+                self.append_pass(report)
         elif report.failed:
             if report.when != "call":
                 self.append_error(report)


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -334,8 +334,8 @@
         l = list(args) + [p]
         reprec = self.inline_run(*l)
         reports = reprec.getreports("pytest_runtest_logreport")
-        assert len(reports) == 1, reports
-        return reports[0]
+        assert len(reports) == 3, reports # setup/call/teardown
+        return reports[1]
 
     def inline_genitems(self, *args):
         return self.inprocess_run(list(args) + ['--collectonly'])


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/resultlog.py
--- a/_pytest/resultlog.py
+++ b/_pytest/resultlog.py
@@ -63,6 +63,8 @@
         self.write_log_entry(testpath, lettercode, longrepr)
 
     def pytest_runtest_logreport(self, report):
+        if report.when != "call" and report.passed:
+            return
         res = self.config.hook.pytest_report_teststatus(report=report)
         code = res[1]
         if code == 'x':


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -26,25 +26,36 @@
         return
     tr = terminalreporter
     duration2rep = {}
+    alldurations = 0.0
     for key, replist in tr.stats.items():
         if key == "deselected":
             continue
         for rep in replist:
             duration2rep[rep.duration] = rep
+            alldurations += rep.duration
     if not duration2rep:
         return
-    d2 = duration2rep.items()
+    d2 = remaining = duration2rep.items()
     d2.sort()
     d2.reverse()
     if not durations:
         tr.write_sep("=", "slowest test durations")
     else:
         tr.write_sep("=", "slowest %s test durations" % durations)
+        remaining = d2[durations:]
         d2 = d2[:durations]
 
+    assert (alldurations/100) > 0
     for duration, rep in d2:
         nodeid = rep.nodeid.replace("::()::", "::")
-        tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid))
+        percent = rep.duration / (alldurations  / 100)
+        tr.write_line("%02.2fs %-02.2f%% %s %s" %
+            (duration, percent, rep.when, nodeid))
+    if remaining:
+        remsum = sum(map(lambda x: x[0], remaining))
+        tr.write_line("%02.2fs %-02.2f%% remaining in %d test phases" %(
+            remsum, remsum / (alldurations  / 100), len(remaining)))
+
 
 def pytest_sessionstart(session):
     session._setupstate = SetupState()
@@ -110,7 +121,7 @@
     call = call_runtest_hook(item, when)
     hook = item.ihook
     report = hook.pytest_runtest_makereport(item=item, call=call)
-    if log and (when == "call" or not report.passed):
+    if log:
         hook.pytest_runtest_logreport(report=report)
     return report
 


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 _pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -426,9 +426,10 @@
                 keys.append(key)
         parts = []
         for key in keys:
-            val = self.stats.get(key, None)
-            if val:
-                parts.append("%d %s" %(len(val), key))
+            if key: # setup/teardown reports have an empty key, ignore them
+                val = self.stats.get(key, None)
+                if val:
+                    parts.append("%d %s" %(len(val), key))
         line = ", ".join(parts)
         # XXX coloring
         msg = "%s in %.2f seconds" %(line, session_duration)


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 doc/announce/release-2.2.0.txt
--- a/doc/announce/release-2.2.0.txt
+++ b/doc/announce/release-2.2.0.txt
@@ -1,7 +1,7 @@
 py.test 2.2.0: new test duration profiling and bug fixes
 ===========================================================================
 
-pytest-2.2.0 is a backward compatible release of the popular
+pytest-2.2.0 is a quite backward compatible release of the popular
 py.test testing tool.  It introduces the new "--duration=N" option
 showing the N slowest test execution or setup/teardown calls.   The
 release also contains a few fixes and some cleanup of pytest's own test
@@ -16,6 +16,19 @@
     pip install -U pytest # or
     easy_install -U pytest
 
+incompatible change
+------------------------------------
+
+* You need a new version of the pytest-xdist plugin (1.7) for distributing 
+  test runs.  
+
+* Also other plugins might need an upgrade if they implement
+  the ``pytest_runtest_logreport`` hook which now is called unconditionally
+  for the setup/teardown fixture phases of a test. You can just choose to
+  ignore them by inserting "if rep.when != 'call': return". Note that
+  most code probably "just" works because the hook was already called
+  for failing setup/teardown phases of a test.
+
 best,
 holger krekel
 


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
         name='pytest',
         description='py.test: simple powerful testing with Python',
         long_description = long_description,
-        version='2.2.0.dev1',
+        version='2.2.0.dev2',
         url='http://pytest.org',
         license='MIT license',
         platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 testing/acceptance_test.py
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -484,12 +484,15 @@
         "*call*test_2*",
         "*call*test_1*",
     ])
+    assert "remaining in"  not in result.stdout.str()
+
     result = testdir.runpytest("--durations=2")
     assert result.ret == 0
     result.stdout.fnmatch_lines([
         "*durations*",
         "*call*test_3*",
         "*call*test_2*",
+        "*s*%*remaining in 7 test phases",
     ])
     assert "test_1" not in result.stdout.str()
     result = testdir.runpytest("--durations=0")
@@ -503,6 +506,28 @@
             else:
                 raise AssertionError("not found %s %s" % (x,y))
 
+def test_duration_test_with_fixture(testdir):
+    testdir.makepyfile("""
+        import time
+        frag = 0.01
+        def setup_function(func):
+            time.sleep(frag * 3)
+
+        def test_1():
+            time.sleep(frag*2)
+        def test_2():
+            time.sleep(frag)
+    """)
+    result = testdir.runpytest("--durations=10")
+    assert result.ret == 0
+    result.stdout.fnmatch_lines([
+        "*durations*",
+        "*setup*test_1*",
+        "*setup*test_2*",
+        "*call*test_1*",
+        "*call*test_2*",
+    ])
+
     result = testdir.runpytest("--durations=2", "-k test_1")
     assert result.ret == 0
     result.stdout.fnmatch_lines([


diff -r c6cba3e0930142e8498f4ab4d7539384f8da74c7 -r 70a660cea75ed5f9eba64cb8e2901b7bb3402829 testing/test_mark.py
--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -77,7 +77,7 @@
                 assert hasattr(test_hello, 'hello')
         """)
         result = testdir.runpytest(p)
-        result.stdout.fnmatch_lines(["*passed*"])
+        result.stdout.fnmatch_lines(["*1 passed*"])
 
     def test_mark_per_module(self, testdir):
         item = testdir.getitem("""

Repository URL: https://bitbucket.org/hpk42/pytest/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the pytest-commit mailing list