[pypy-svn] r58280 - in pypy/build/benchmem: . testing

hpk at codespeak.net hpk at codespeak.net
Sat Sep 20 19:24:01 CEST 2008


Author: hpk
Date: Sat Sep 20 19:23:58 2008
New Revision: 58280

Added:
   pypy/build/benchmem/benchreport.py
Modified:
   pypy/build/benchmem/benchtool.py
   pypy/build/benchmem/testing/test_benchtool.py
Log:
tweaking things a bit so one can run e.g. 

    benchtool.py --executable=python2.5 
    benchtool.py --executable=pypy-c

which logs to a single log file (bench.log by default) 
and then benchreport.py (reading from bench.log) 
reports maximum dirty numbers. 

that's rather non-sensical output for now 
but am off now for a bit and wanted to 
checkin anyway.



Added: pypy/build/benchmem/benchreport.py
==============================================================================
--- (empty file)
+++ pypy/build/benchmem/benchreport.py	Sat Sep 20 19:23:58 2008
@@ -0,0 +1,18 @@
+import py
+import smaps, benchtool
+
+if __name__ == "__main__":
+    benchlog = py.path.local("bench.log")
+    reader = benchtool.LogReader()
+    reader.parse_logfile(benchlog)
+
+    tw = py.io.TerminalWriter()
+
+    for name, results in reader.name2results.items():
+        tw.sep("=", name)
+        for result in results:
+            print "%-30s max dirty = %s + %s" % (
+                result.executable, 
+                result.max("shared_dirty"), 
+                result.max("private_dirty")
+            )

Modified: pypy/build/benchmem/benchtool.py
==============================================================================
--- pypy/build/benchmem/benchtool.py	(original)
+++ pypy/build/benchmem/benchtool.py	Sat Sep 20 19:23:58 2008
@@ -95,14 +95,14 @@
 # ================ reading a benchmark log file =======================
 #
 
-class Benchresults(object):
+class LogReader(object):
     def __init__(self):
         self.name2results = {}
 
     def parse_logfile(self, logpath):
         f = logpath.open()
         for result in BenchmarkResult.parse(f):
-            #print "parsed", result
+            print "parsed", result
             l = self.name2results.setdefault(result.benchname, [])
             l.append(result)
         f.close()
@@ -115,6 +115,12 @@
         self.benchname = benchname
         self.benchargs = benchargs
 
+    def max(self, attrname):
+        maxvalue = 0
+        for snap in self.snapshots:
+            maxvalue = max(maxvalue, getattr(snap, attrname))
+        return maxvalue
+
     @classmethod
     def parse(cls, f):
         while not f.closed:
@@ -138,8 +144,9 @@
                     #print "reading", line.strip()
                 snapshots.append(Snapshot(mappings))
                 line = f.readline()
-                if not line or line == BenchRunner.SEPBENCH:
-                    break 
+                #print "reading", line.strip()
+                if not line or line.startswith(BenchRunner.SEPBENCH):
+                    break
             yield BenchmarkResult(snapshots, executable=executable, 
                 benchname=benchname, benchargs=benchargs)
 
@@ -206,5 +213,5 @@
    
     runner = BenchRunner(executable, benchlog)
     for name in names:
-        runner.run_checkpointed_bench(name, (100, 100))
+        runner.run_checkpointed_bench(name, (100, 1000))
     print "bench results append to -->>>", benchlog

Modified: pypy/build/benchmem/testing/test_benchtool.py
==============================================================================
--- pypy/build/benchmem/testing/test_benchtool.py	(original)
+++ pypy/build/benchmem/testing/test_benchtool.py	Sat Sep 20 19:23:58 2008
@@ -31,7 +31,7 @@
         runner = benchtool.BenchRunner(executable="python2.5", benchlog=log)
         runner.run_checkpointed_bench(path, args)
         assert log.check()
-        benchresult = benchtool.Benchresults()
+        benchresult = benchtool.LogReader()
         benchresult.parse_logfile(log)
         #assert reader.executable
         #assert reader.executable



More information about the Pypy-commit mailing list