[pypy-svn] r58974 - in pypy/build/benchmem: . testing

xoraxax at codespeak.net xoraxax at codespeak.net
Sat Oct 11 15:43:03 CEST 2008


Author: xoraxax
Date: Sat Oct 11 15:43:02 2008
New Revision: 58974

Modified:
   pypy/build/benchmem/report.py
   pypy/build/benchmem/runbench.py
   pypy/build/benchmem/smaps.py
   pypy/build/benchmem/testing/test_benchtool.py
Log:
(hpk, xoraxax) Add resultset filtering, new basesize reporting, new tests.

Modified: pypy/build/benchmem/report.py
==============================================================================
--- pypy/build/benchmem/report.py	(original)
+++ pypy/build/benchmem/report.py	Sat Oct 11 15:43:02 2008
@@ -47,13 +47,13 @@
 
 class TableReporter:
     def __init__(self, resultset):
-        self.resultset = resultset 
+        self.resultset = resultset.filter(benchtype="objsizes")
 
     begin = report = end = lambda x: None  # hooks
 
     def run(self):
         self.begin()
-        for name, results in resultset.getname2results():
+        for name, results in self.resultset.getname2results():
             row0 = ["num"] + [result.executable for result in results]
             numsnapshots = min([len(i.snapshots) for i in results])
 
@@ -109,7 +109,7 @@
 
 class IncrementalSizePerBench:
     def __init__(self, resultset):
-        self.resultset = resultset.filter(benchtype="sizes")
+        self.resultset = resultset.filter(benchtype="objsizes")
 
     def getexecutables(self):
         ret = None
@@ -123,6 +123,8 @@
         return ret 
 
     def run(self):
+        if not self.resultset:
+            return
         tw = py.io.TerminalWriter()
         tw.sep("=", "Incremental private RSS of size benchmarks")
 
@@ -156,18 +158,42 @@
     def __init__(self, resultset):
         self.resultset = resultset.filter(benchtype="basesize")
 
+    def getexecutables(self):
+        return [r.executable for r in self.resultset.results]
+
     def run(self):
-        XXX
         tw = py.io.TerminalWriter()
-        tw.sep("=", "Base Size of interpreters (using sizes.bench_list_of_None)")
-        sorter = Sorter(self.reader.name2results).filter(benchprefix="bench_list_of_None")
-        row0 = "executable rss shared_clean shared_dirty private_clean private_dirty".split()
+        tw.sep("=", "Basesize of interpreters")
+        executables = self.getexecutables()
+        row0 = ['Group-Pagestate'] + executables
         rows = [row0]
-        for result in sorter.name2results['sizes.bench_list_of_None']:
-            row = [result.executable]
-            for name in row0[1:]:
-                row.append(getattr(result.snapshots[0], name))
+        row_kinds = "shared_clean shared_dirty private_clean private_dirty".split()
+        results = self.resultset.results
+        def makerow(rowname, mapping_func):
+            row = [rowname]
+            for result in results:
+                row.append(mapping_func(result))
             rows.append(row)
+        HEAP = runbench.Mappings.HEAP
+        CODE = runbench.Mappings.CODE
+        DATA = runbench.Mappings.DATA
+        makerow("HEAP-RSS", lambda result: 
+                result.snapshot.filter(group=HEAP).rss)
+        makerow("HEAP-private_clean", lambda result:
+                result.snapshot.filter(group=HEAP).private_clean)
+        makerow("HEAP-private_dirty", lambda result:
+                result.snapshot.filter(group=HEAP).private_dirty)
+        makerow("IP-CODE-shared_clean", lambda result:
+                result.snapshot.filter(group=result.executable, kind=CODE).shared_clean)
+        makerow("IP-CODE-private_clean", lambda result:
+                result.snapshot.filter(group=result.executable, kind=CODE).private_clean)
+        makerow("IP-DATA-RSS", lambda result:
+                result.snapshot.filter(group=result.executable, kind=DATA).rss)
+        makerow("IP-DATA-private_clean", lambda result:
+                result.snapshot.filter(group=result.executable, kind=DATA).private_clean)
+        makerow("IP-DATA-private_dirty", lambda result:
+                result.snapshot.filter(group=result.executable, kind=DATA).private_dirty)
+#rest
         tw.line(asciitable(rows))
 
 if __name__ == "__main__":
@@ -180,7 +206,7 @@
     #maxtable_overview(reader)
     CheckpointDetails(resultset).run()
     IncrementalSizePerBench(resultset).run()
-    #BaseSizeOfInterpreters(resultset).run()
+    BaseSizeOfInterpreters(resultset).run()
     
     if options.gnuplot: 
         Gnuplot(resultset).run()

Modified: pypy/build/benchmem/runbench.py
==============================================================================
--- pypy/build/benchmem/runbench.py	(original)
+++ pypy/build/benchmem/runbench.py	Sat Oct 11 15:43:02 2008
@@ -56,9 +56,9 @@
 
 
 class BenchRunnerObjsize(BenchRunner):
-    def __init__(self, executable, fsname, logpath, options):
+    def __init__(self, executable, logpath, options):
         self.executable = executable
-        self.benchpath = benchmarkdir.join(fsname)
+        self.benchpath = benchmarkdir.join("sizes.py")
         assert self.benchpath.check()
         self.logpath = py.path.local(logpath)
         self.logstream = self.logpath.open("a")
@@ -95,7 +95,7 @@
 
     def write_benchheader(self, benchname, args):
         print >>self.logstream, self.SEPBENCH 
-        print >>self.logstream, "#benchtype=sizes"
+        print >>self.logstream, "#benchtype=objsizes"
         print >>self.logstream, "#executable=%s" %(str(self.executable ),)
         print >>self.logstream, "#benchpath=%s" %(self.benchpath.basename,)
         print >>self.logstream, "#benchname=%s" %(benchname,)
@@ -151,6 +151,9 @@
             results = []
         self.results = results
 
+    def __nonzero__(self):
+        return bool(self.results)
+
     def getname2results(self):
         name2results = {}
         for result in self.results:
@@ -223,7 +226,7 @@
 
 
 class ObjsizeResult(Result):
-    benchtype = "sizes"
+    benchtype = "objsizes"
     def __init__(self, snapshots, executable, benchpath, benchname, benchargs):
         assert snapshots
         self.snapshots = snapshots
@@ -254,25 +257,48 @@
         self.executable = executable
 
 
-class Snapshot(object):
+class Mappings(object):
+    HEAP, CODE, DATA = object(), object(), object()
+
     def __init__(self, mappings):
-        assert mappings
         self.mappings = mappings
         for name in smaps.Mapping._attrnames:
             setattr(self, name, sum([getattr(x, name) for x in mappings]))
         self.private = self.private_dirty + self.private_clean
         self.shared = self.shared_dirty + self.shared_clean
 
-    def heap_private(self):
-        private = 0
+    def filter(self, group=None, kind=None, inv=False):
+        new_mappings = []
         for mapping in self.mappings:
-            if mapping.filename == "[heap]" or not mapping.filename:
-                private += mapping.private_clean + mapping.private_dirty 
-        return private 
+            matches = True
+            if group is not None:
+                if group is self.HEAP:
+                    if not (mapping.filename == "[heap]"
+                        or not mapping.filename):
+                        matches = False
+                elif not mapping.filename.endswith(group):
+                    matches = False
+            if kind is not None:
+                if kind is self.CODE and mapping.mode != 'r-xp':
+                    matches = False
+                if kind is self.DATA and mapping.mode != 'rw-p':
+                    matches = False
+
+            if matches != bool(inv):
+                new_mappings.append(mapping)
+        return Mappings(new_mappings)
 
     def memusage(self):
         return "private: %d, shared: %d" %(self.private, self.shared)
 
+class Snapshot(Mappings):
+    def __init__(self, mappings):
+        assert mappings
+        Mappings.__init__(self, mappings)
+
+    def heap_private(self):
+        return self.filter(group=self.HEAP).private
+
 #
 # ==============================================================================
 # Option Handling
@@ -309,19 +335,21 @@
         l.append(executable)
     return l
 
-def getrunnerclass(fsname):
-    if fsname == "sizes.py":
+def getrunnerclass(benchtype):
+    if benchtype == "objsizes":
         return BenchRunnerObjsize
+    if benchtype == "basesize":
+        return BenchRunnerBaseSize
 
 if __name__ == '__main__':
     (options, args) = parser.parse_args()
     
     benchlog = getbenchlog(options)
     if not args:
-        args = ("sizes.py",)
+        args = ("objsizes", "basesize")
     for executable in getexecutables(options):
-        for fsname in args:
-            Runner = getrunnerclass(fsname)
-            runner = Runner(executable, fsname, benchlog, options)
+        for benchtype in args:
+            Runner = getrunnerclass(benchtype)
+            runner = Runner(executable, benchlog, options)
             runner.run()
     print "bench results append to -->>>", benchlog

Modified: pypy/build/benchmem/smaps.py
==============================================================================
--- pypy/build/benchmem/smaps.py	(original)
+++ pypy/build/benchmem/smaps.py	Sat Oct 11 15:43:02 2008
@@ -62,4 +62,8 @@
         for name, value in zip(self._headattrnames, parts):
             setattr(self, name, value)
         self.inode = int(self.inode)
-    
+
+    def __repr__(self):
+        attrdata = ",".join([str(getattr(self, name)) for name in self._attrnames])
+        headcols = " ".join([str(getattr(self, name)) for name in self._headattrnames])
+        return "<Mapping: %s %s>" % (attrdata, headcols)

Modified: pypy/build/benchmem/testing/test_benchtool.py
==============================================================================
--- pypy/build/benchmem/testing/test_benchtool.py	(original)
+++ pypy/build/benchmem/testing/test_benchtool.py	Sat Oct 11 15:43:02 2008
@@ -3,6 +3,7 @@
 
 import runbench
 import smaps
+import report
 
 def setup_module(mod):
     if sys.platform.find("linux") == -1:
@@ -27,7 +28,7 @@
     benchlog=tmpdir.join("log")
     class options:
         numiter = 10
-    runner = runbench.BenchRunnerObjsize("python2.5", "sizes.py", benchlog, options)
+    runner = runbench.BenchRunnerObjsize("python2.5", benchlog, options)
     assert runner.benchpath.basename == "sizes.py"
     runner.run()
     resultset = runbench.ResultSet()
@@ -43,11 +44,11 @@
     script = py.path.local(runbench.__file__).dirpath("runbench.py")
     output = py.process.cmdexec("python %s --numiter=10" %(script))
 
-def test_report_functional():
+def test_report_objsizes_functional():
     tmpdir = py.test.ensuretemp("test_report_functional")
     script = py.path.local(runbench.__file__).dirpath("runbench.py")
     benchlog = tmpdir.join("benchlog")
-    py.process.cmdexec("%s --benchlog=%s --numiter=10" %(script, benchlog))
+    py.process.cmdexec("%s --benchlog=%s --numiter=10 objsizes" %(script, benchlog))
     report = script.dirpath("report.py")
     old = tmpdir.chdir()
     try:
@@ -158,6 +159,31 @@
         val2 = getattr(snap.mappings[1], name)
         assert sumval == val1 + val2
 
+def test_mapping_grouping():
+    lines = [
+        "988,796,0,0,796,0 08048000-0813f000 r-xp 00000000 fd:00 75457      /tmp/IP",
+        "420,96,0,0,4,92 08048000-0813f000 rw-p 00000000 fd:00 75457      /tmp/IP",
+        "420,0,0,0,0,13 08048000-0813f000 rw-p 00000000 fd:00 75457      /usr/foo.lib",
+        "504,416,0,0,0,416        08165000-081e3000 rw-p 08165000 00:00 0          [heap]",
+        "524,400,0,0,0,400        b7d0e000-b7d91000 rw-p b7d0e000 00:00 0",
+        ]
+    snap = runbench.Snapshot([smaps.Mapping(line) for line in lines])
+    m_heap = snap.filter(group=snap.HEAP)
+    assert m_heap.private == 416 + 400
+    assert len(m_heap.mappings) == 2
+
+    m_noheap = snap.filter(group=snap.HEAP, inv=True)
+    assert len(m_noheap.mappings) == 3
+
+    m_noip_noheap = m_noheap.filter(group="IP", inv=True)
+    assert len(m_noip_noheap.mappings) == 1
+
+    m_ip_code = snap.filter(group="IP", kind=snap.CODE)
+    assert m_ip_code.private_clean == 796
+    assert m_ip_code.private_dirty == 0
+    m_ip_data = snap.filter(group="IP", kind=snap.DATA)
+    assert m_ip_data.private_clean == 4
+    assert m_ip_data.private_dirty == 92
 
 def test_basesize():
     tmpdir = py.test.ensuretemp("basesize")
@@ -173,3 +199,36 @@
     assert result.benchtype == 'basesize'
     assert result.executable.endswith('python2.5')
     assert result.snapshot.heap_private() > 42
+
+def test_basesize_reporting():
+    tmpdir = py.test.ensuretemp("test_basesize_reporting")
+    script = py.path.local(runbench.__file__).dirpath("runbench.py")
+    benchlog = tmpdir.join("benchlog")
+    print "Runner"
+    py.process.cmdexec("%s --benchlog=%s basesize" % (script, benchlog))
+    report = script.dirpath("report.py")
+    old = tmpdir.chdir()
+    try:
+        print "Reporter"
+        out = py.process.cmdexec("%s --benchlog %s" %(report, benchlog,))
+        assert out.lower().find("basesize") != -1
+    finally:
+        old.chdir()
+
+def test_basesize_multiple_binaries_half_functional():
+    tmpdir = py.test.ensuretemp("test_basesize_hf")
+    script = py.path.local(runbench.__file__).dirpath("runbench.py")
+    benchlog = tmpdir.join("benchlog")
+    py.process.cmdexec("%s -e python2.5,python2.5 --benchlog=%s basesize" % (script, benchlog))
+
+    resultset = runbench.ResultSet()
+    resultset.parse(benchlog)
+
+    assert len(resultset.results) == 2
+    result1, result2 = resultset.results
+    assert result1.benchtype == 'basesize'
+    assert result1.executable.endswith('python2.5')
+    assert result1.snapshot.heap_private() > 42
+    assert result2.snapshot.heap_private() - result1.snapshot.heap_private() < 21
+    report.CheckpointDetails(resultset).run() # shouldnt do anything
+



More information about the Pypy-commit mailing list