[pypy-svn] r59667 - in pypy/build/benchmem: . benchmark testing

xoraxax at codespeak.net xoraxax at codespeak.net
Sun Nov 2 22:20:43 CET 2008


Author: xoraxax
Date: Sun Nov  2 22:20:43 2008
New Revision: 59667

Added:
   pypy/build/benchmem/benchmark/gcbench.py   (contents, props changed)
      - copied, changed from r59654, pypy/trunk/pypy/translator/goal/gcbench.py
Modified:
   pypy/build/benchmem/benchmark/appprofiles.py
   pypy/build/benchmem/report_graphic.py
   pypy/build/benchmem/runbench.py
   pypy/build/benchmem/testing/test_benchtool.py
Log:
Add gcbench to the appprofiles, derive running duration logarithmically from numiter. Also fixed pauses unittest that got result naming a bit wrong.

Modified: pypy/build/benchmem/benchmark/appprofiles.py
==============================================================================
--- pypy/build/benchmem/benchmark/appprofiles.py	(original)
+++ pypy/build/benchmem/benchmark/appprofiles.py	Sun Nov  2 22:20:43 2008
@@ -1,3 +1,7 @@
+import math
+
+import gcbench
+
 
 class newstyle(object):
     def __init__(self, i, next):
@@ -64,3 +68,11 @@
         next = None
         for i in range(iter1):
             next = B(next)
+
+
+def bench_gcbench(iter1, iter2):
+    base_depth = int(math.log(iter1, 10) * 3)
+    gcbench.kStretchTreeDepth = base_depth + 2
+    gcbench.kLongLivedTreeDepth = base_depth
+    gcbench.main()
+

Copied: pypy/build/benchmem/benchmark/gcbench.py (from r59654, pypy/trunk/pypy/translator/goal/gcbench.py)
==============================================================================
--- pypy/trunk/pypy/translator/goal/gcbench.py	(original)
+++ pypy/build/benchmem/benchmark/gcbench.py	Sun Nov  2 22:20:43 2008
@@ -93,20 +93,17 @@
 
 def time_construction(depth):
     niters = num_iters(depth)
-    print "Creating %d trees of depth %d" % (niters, depth)
     t_start = time.time()
     for i in range(niters):
         temp_tree = Node()
         populate(depth, temp_tree)
         temp_tree = None
     t_finish = time.time()
-    print "\tTop down constrution took %f ms" % ((t_finish-t_start)*1000.)
     t_start = time.time()
     for i in range(niters):
         temp_tree = make_tree(depth)
         temp_tree = None
     t_finish = time.time()
-    print "\tBottom up constrution took %f ms" % ((t_finish-t_start)*1000.)
 
 DEFAULT_DEPTHS = range(kMinTreeDepth, kMaxTreeDepth+1, 2)
 
@@ -117,36 +114,28 @@
 def time_parallel_constructions(depths, nthreads):
     import threading
     threadlist = []
-    print "Starting %d parallel threads..." % (nthreads,)
     for n in range(nthreads):
         t = threading.Thread(target=time_constructions, args=(depths,))
         t.start()
         threadlist.append(t)
     for t in threadlist:
         t.join()
-    print "All %d threads finished" % (nthreads,)
 
 def main(depths=DEFAULT_DEPTHS, threads=0):
-    print "Garbage Collector Test"
-    print " Stretching memory with a binary tree of depth %d" % kStretchTreeDepth
-    print_diagnostics()
     t_start = time.time()
     temp_tree = make_tree(kStretchTreeDepth)
     temp_tree = None
 
     # Create a long lived object
-    print " Creating a long-lived binary tree of depth %d" % kLongLivedTreeDepth
     long_lived_tree = Node()
     populate(kLongLivedTreeDepth, long_lived_tree)
 
     # Create long-lived array, filling half of it
-    print " Creating a long-lived array of %d doubles" % kArraySize
     array = [0.0] * kArraySize
     i = 1
     while i < kArraySize/2:
         array[i] = 1.0/i
         i += 1
-    print_diagnostics()
 
     if threads:
         time_parallel_constructions(depths, threads)
@@ -157,8 +146,6 @@
         raise Failed
 
     t_finish = time.time()
-    print_diagnostics()
-    print "Completed in %f ms." % ((t_finish-t_start)*1000.)
 
 class Failed(Exception):
     pass

Modified: pypy/build/benchmem/report_graphic.py
==============================================================================
--- pypy/build/benchmem/report_graphic.py	(original)
+++ pypy/build/benchmem/report_graphic.py	Sun Nov  2 22:20:43 2008
@@ -112,7 +112,7 @@
                 pylab.title(name)
                 plots.append(pylab.plot(x, y))
 
-            pylab.legend(plots, [result.executable for result in results])
+            pylab.legend(plots, [result.executable_short for result in results])
             xlabel = ["wall clock time (%)", "wall clock time (s)"][SHOW_TS]
             pylab.xlabel(xlabel)
             ylabel = ["", "incremental "][bool(basesize)] + "private memory consumption (kB)"

Modified: pypy/build/benchmem/runbench.py
==============================================================================
--- pypy/build/benchmem/runbench.py	(original)
+++ pypy/build/benchmem/runbench.py	Sun Nov  2 22:20:43 2008
@@ -31,6 +31,13 @@
         self.options = options
         self.tmpdir = py.path.local.make_numbered_dir(prefix="bench")
 
+    def getbenchsource(self):
+        preamble = """
+            import sys
+            sys.path.append(%r)
+        """ % (str(benchmarkdir), )
+        return py.code.Source(preamble, self.benchpath.read())
+
     def getnames(self):
         l = []
         for name, obj in vars(self.benchpath.pyimport()).items():
@@ -218,7 +225,7 @@
 
     def makebench(self, name):
         arglist = (int(self.options.numiter), self.ITER2)
-        source = py.code.Source(self.benchpath.read(), """
+        source = py.code.Source(self.getbenchsource(), """
             import gc
             def write(c):
                 sys.stdout.write(c)
@@ -258,9 +265,6 @@
         for name in self.getnames():
             self.run_once(name)
 
-    def write_header(self):
-        self.write_benchheader('pauses', self.args)
-
     def write_footer(self):
         self.logstream.flush()
 
@@ -271,10 +275,10 @@
         self.last_t = t
 
     def run_once(self, name):
-        self.write_header()
+        self.write_benchheader(name, self.args)
         from tracer import trace_in_another_process
         trace_in_another_process(self.tmpdir.join(self.benchpath.basename),
-                                 self.benchpath.read(),
+                                 self.getbenchsource(),
                                  name, self.measure, self.executable,
                                  self.args)
         self.write_footer()
@@ -315,6 +319,12 @@
             l.append(result)
         return name2results.items()
 
+    def getexecutables(self, basename_only=False):
+        names = [result.executable for result in self.results]
+        if basename_only:
+            names = [name.rsplit(os.path.sep, 1)[1] for name in names]
+        return names
+
     def filter(self, benchtype):
         l = []
         for result in self.results:
@@ -342,6 +352,7 @@
             yield parse_result(stream_iter, kw)
 
 class Result(object):
+
     @classmethod
     def parse(cls, lnstream, kw):
         snapshots = []
@@ -372,6 +383,11 @@
         self.benchname = benchname
         self.benchargs = benchargs
 
+    @property
+    def executable_short(self):
+        return self.executable.rsplit(os.path.sep)[-1]
+
+
 class ObjsizeResult(CommonResult):
     benchtype = "objsizes"
 

Modified: pypy/build/benchmem/testing/test_benchtool.py
==============================================================================
--- pypy/build/benchmem/testing/test_benchtool.py	(original)
+++ pypy/build/benchmem/testing/test_benchtool.py	Sun Nov  2 22:20:43 2008
@@ -91,7 +91,7 @@
     resultset = runbench.ResultSet()
     resultset.parse(benchlog)
     for name, results in resultset.getname2results():
-        assert len(results) == 4
+        assert len(results) == 1
         assert len(results[0].lst)
 
 def test_runbench_functional():



More information about the Pypy-commit mailing list