[pypy-svn] r58925 - in pypy/build/benchmem: . testing

hpk at codespeak.net hpk at codespeak.net
Fri Oct 10 18:57:14 CEST 2008


Author: hpk
Date: Fri Oct 10 18:57:14 2008
New Revision: 58925

Modified:
   pypy/build/benchmem/report.py
   pypy/build/benchmem/runbench.py
   pypy/build/benchmem/testing/test_benchtool.py
Log:
(xoraxax, hpk) refactor benchmarking parsing, resultset handling, preparing different types of benchmarks 


Modified: pypy/build/benchmem/report.py
==============================================================================
--- pypy/build/benchmem/report.py	(original)
+++ pypy/build/benchmem/report.py	Fri Oct 10 18:57:14 2008
@@ -31,10 +31,10 @@
     return "\n".join(lines)
 
         
-def maxtable_overview(reader):
+def maxtable_overview(resultset):
     tw = py.io.TerminalWriter()
 
-    for name, results in reader.name2results.items():
+    for name, results in resultset.getname2results():
         tw.sep("=", name)
         row0 = "executable maxprivate maxrss".split()
         rows = [row0]
@@ -46,14 +46,14 @@
         tw.line(asciitable(rows))
 
 class TableReporter:
-    def __init__(self, reader):
-        self.reader = reader 
+    def __init__(self, resultset):
+        self.resultset = resultset 
 
     begin = report = end = lambda x: None  # hooks
 
     def run(self):
         self.begin()
-        for name, results in reader.name2results.items():
+        for name, results in resultset.getname2results():
             row0 = ["num"] + [result.executable for result in results]
             numsnapshots = min([len(i.snapshots) for i in results])
 
@@ -107,25 +107,13 @@
          )
         os.system("gnuplot gnuplotcmd")
 
-class Sorter:
-    def __init__(self, name2results):
-        self.name2results = name2results
-
-    def filter(self, benchprefix=""):
-        d = {}
-        for name, results in self.name2results.iteritems():
-            if name.startswith(benchprefix):
-                d[name] = results
-        return Sorter(d)
-
-    def getsorted(self):
-        l = self.name2results.items()
-        l.sort()
-        return l
+class IncrementalSizePerBench:
+    def __init__(self, resultset):
+        self.resultset = resultset.filter(benchtype="sizes")
 
     def getexecutables(self):
         ret = None
-        for name, results in self.name2results.iteritems():
+        for name, results in self.resultset.getname2results():
             executables = [result.executable for result in results]
             if ret is None:
                 ret = executables
@@ -133,20 +121,18 @@
                 raise ValueError("resultset has incompatible list of executables"
                     "%s != %s" %(ret, executables))
         return ret 
-            
-class IncrementalSizePerBench:
-    def __init__(self, reader):
-        self.reader = reader
 
     def run(self):
         tw = py.io.TerminalWriter()
         tw.sep("=", "Incremental private RSS of size benchmarks")
-        sorter = Sorter(self.reader.name2results)
-        sorter = sorter.filter(benchprefix="sizes.")
-        executables = sorter.getexecutables()
+
+        executables = self.getexecutables()
         row0 = ["name"] + [str(x) for x in executables]
         rows = [row0]
-        for name, results in sorter.getsorted():
+        name2results = self.resultset.getname2results()
+        name2results.sort() 
+
+        for name, results in name2results:
             basesize = self.get_incremental_size(results[0])
             row = [name]
             for result in results:
@@ -167,12 +153,14 @@
         return inbench - basesize
 
 class BaseSizeOfInterpreters:
-    def __init__(self, reader):
-        self.reader = reader
+    def __init__(self, resultset):
+        self.resultset = resultset.filter(benchtype="basesize")
+
     def run(self):
+        XXX
         tw = py.io.TerminalWriter()
         tw.sep("=", "Base Size of interpreters (using sizes.bench_list_of_None)")
-        sorter = Sorter(self.reader.name2results).filter(benchprefix="sizes.")
+        sorter = Sorter(self.reader.name2results).filter(benchprefix="bench_list_of_None")
         row0 = "executable rss shared_clean shared_dirty private_clean private_dirty".split()
         rows = [row0]
         for result in sorter.name2results['sizes.bench_list_of_None']:
@@ -186,16 +174,16 @@
     options, args = parser.parse_args()
 
     benchlog = py.path.local(options.benchlog)
-    reader = runbench.LogReader()
-    reader.parse(benchlog)
+    resultset = runbench.ResultSet()
+    resultset.parse(benchlog)
 
     #maxtable_overview(reader)
-    CheckpointDetails(reader).run()
-    IncrementalSizePerBench(reader).run()
-    BaseSizeOfInterpreters(reader).run()
+    CheckpointDetails(resultset).run()
+    IncrementalSizePerBench(resultset).run()
+    #BaseSizeOfInterpreters(resultset).run()
     
     if options.gnuplot: 
-        Gnuplot(reader).run()
+        Gnuplot(resultset).run()
 
     #for name, results in reader.name2results.items():
     #    tw.sep("=", name)

Modified: pypy/build/benchmem/runbench.py
==============================================================================
--- pypy/build/benchmem/runbench.py	(original)
+++ pypy/build/benchmem/runbench.py	Fri Oct 10 18:57:14 2008
@@ -59,10 +59,12 @@
 
     def write_benchheader(self, benchname, args):
         print >>self.logstream, self.SEPBENCH 
-        print >>self.logstream, "#executable=%r" %(str(self.executable ),)
-        print >>self.logstream, "#benchpath=%r" %(self.benchpath.basename,)
-        print >>self.logstream, "#benchname=%r" %(benchname,)
-        print >>self.logstream, "#benchargs=%r" %(args,)
+        print >>self.logstream, "#benchtype=sizes"
+        print >>self.logstream, "#executable=%s" %(str(self.executable ),)
+        print >>self.logstream, "#benchpath=%s" %(self.benchpath.basename,)
+        print >>self.logstream, "#benchname=%s" %(benchname,)
+        print >>self.logstream, "#benchargs=%s" %(args,)
+        print >>self.logstream
 
     def getnames(self):
         l = []
@@ -107,23 +109,40 @@
 # ================ reading a benchmark log file =======================
 #
 
-class LogReader(object):
-    def __init__(self):
-        self.name2results = {}
+class ResultSet(object):
+    def __init__(self, results=None):
+        if results is None:
+            results = []
+        self.results = results
+
+    def getname2results(self):
+        name2results = {}
+        for result in self.results:
+            l = name2results.setdefault(result.benchname, [])
+            l.append(result)
+        return name2results.items()
+
+    def filter(self, benchtype):
+        l = []
+        for result in self.results:
+            if benchtype == None or result.benchtype == benchtype:
+                l.append(result)
+        return ResultSet(l)
 
     def parse(self, logpath):
         f = logpath.open()
         for result in BenchmarkResult.parse(f):
             #print "parsed", result
-            l = self.name2results.setdefault(result.benchname, [])
-            l.append(result)
+            self.results.append(result)
         f.close()
         
 class BenchmarkResult:
-    def __init__(self, snapshots, executable, benchname, benchargs):
+    benchtype = "sizes"
+    def __init__(self, snapshots, executable, benchpath, benchname, benchargs):
         assert snapshots
         self.snapshots = snapshots
         self.executable = executable
+        self.benchpath = benchpath
         self.benchname = benchname
         self.benchargs = benchargs
 
@@ -134,37 +153,55 @@
         return maxvalue
 
     @classmethod
+    def parseheader(cls, iterline):
+        kw = {}
+        while 1:
+            lineno, line = iterline()
+            if not line.strip():
+                return kw 
+            assert line.startswith("#"), line
+            key, value = map(str.strip, line[1:].split("="))
+            kw[key] = value
+
+    @classmethod
     def parse(cls, f):
+        def genline():
+            lineno = 1
+            while 1:
+                yield lineno, f.readline()
+                lineno += 1
+        iterline = genline().next
+        lineno, line = iterline()
         while not f.closed:
-            line = f.readline()
-            if not line.strip():
+            if not line:
                 break
-            if not line.startswith("#executable"):
-                if line != BenchRunner.SEPBENCH:
-                    print "ignoring", line
+            line = line.rstrip()
+            if line != BenchRunner.SEPBENCH:
+                print "ignoring %d: %s" %(lineno, line)
+                lineno, line = iterline()
                 continue
-            # see write_benchheader
-            executable = eval(line.split("=", 1)[1])
-            benchbasename = eval(f.readline().split("=", 1)[1])
-            benchfuncname = eval(f.readline().split("=", 1)[1])
-            benchname = "%s.%s" % (benchbasename[:-3], benchfuncname)
-            benchargs = eval(f.readline().split("=", 1)[1])
-
-            snapshots = []
-            line = f.readline()
-            while 1:
-                mappings = []
-                while line != smaps.SmapsRecorder.SEPSNAPSHOT:
-                    mappings.append(smaps.Mapping(line))
-                    line = f.readline()
-                    #print "reading", line.strip()
-                snapshots.append(Snapshot(mappings))
-                line = f.readline()
-                #print "reading", line.strip()
-                if not line or line.startswith(BenchRunner.SEPBENCH):
-                    break
-            yield BenchmarkResult(snapshots, executable=executable, 
-                benchname=benchname, benchargs=benchargs)
+            kw = cls.parseheader(iterline)
+           
+            benchtype = kw.pop('benchtype')
+            if benchtype == "sizes":
+                snapshots = []
+                lineno, line = iterline()
+                while 1:
+                    mappings = []
+                    while line != smaps.SmapsRecorder.SEPSNAPSHOT:
+                        mappings.append(smaps.Mapping(line))
+                        lineno, line = iterline()
+                        #print "reading", line.strip()
+                    snapshots.append(Snapshot(mappings))
+                    lineno, line = iterline()
+                    print "reading %d: %s" %(lineno, line)
+                    if not line or line.startswith(BenchRunner.SEPBENCH):
+                        break
+                print "yielding result", kw['benchname']
+                yield BenchmarkResult(snapshots, **kw)
+            else:
+                assert 0, benchtype
+        print "end"
 
     def _verify_integrity(self):
         for snap in self.snapshots:

Modified: pypy/build/benchmem/testing/test_benchtool.py
==============================================================================
--- pypy/build/benchmem/testing/test_benchtool.py	(original)
+++ pypy/build/benchmem/testing/test_benchtool.py	Fri Oct 10 18:57:14 2008
@@ -30,12 +30,12 @@
     runner = runbench.BenchRunnerSizes("python2.5", "sizes.py", benchlog, options)
     assert runner.benchpath.basename == "sizes.py"
     runner.run()
-    benchresult = runbench.LogReader()
-    benchresult.parse(benchlog)
+    resultset = runbench.ResultSet()
+    resultset.parse(benchlog)
 
     names = runner.getnames()
-    assert len(benchresult.name2results) == len(names)
-    for results in benchresult.name2results.values():
+    assert len(resultset.getname2results()) == len(names)
+    for name, results in resultset.getname2results():
         assert len(results) ==1
         assert len(results[0].snapshots) == 2 + 1
 



More information about the Pypy-commit mailing list