[pypy-svn] r61187 - pypy/trunk/lib-python/2.5.2

arigo at codespeak.net arigo at codespeak.net
Wed Jan 21 14:49:19 CET 2009


Author: arigo
Date: Wed Jan 21 14:49:18 2009
New Revision: 61187

Added:
   pypy/trunk/lib-python/2.5.2/profile.py   (contents, props changed)
   pypy/trunk/lib-python/2.5.2/pstats.py   (contents, props changed)
Log:
Add the two missing modules from Python 2.5.2.
I guess that the original import was done from
a Debian installation, in which these two modules
are removed.


Added: pypy/trunk/lib-python/2.5.2/profile.py
==============================================================================
--- (empty file)
+++ pypy/trunk/lib-python/2.5.2/profile.py	Wed Jan 21 14:49:18 2009
@@ -0,0 +1,619 @@
+#! /usr/bin/env python
+#
+# Class for profiling python code. rev 1.0  6/2/94
+#
+# Based on prior profile module by Sjoerd Mullender...
+#   which was hacked somewhat by: Guido van Rossum
+
+"""Class for profiling Python code."""
+
+# Copyright 1994, by InfoSeek Corporation, all rights reserved.
+# Written by James Roskind
+#
+# Permission to use, copy, modify, and distribute this Python software
+# and its associated documentation for any purpose (subject to the
+# restriction in the following sentence) without fee is hereby granted,
+# provided that the above copyright notice appears in all copies, and
+# that both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of InfoSeek not be used in
+# advertising or publicity pertaining to distribution of the software
+# without specific, written prior permission.  This permission is
+# explicitly restricted to the copying and modification of the software
+# to remain in Python, compiled Python, or other languages (such as C)
+# wherein the modified or derived code is exclusively imported into a
+# Python module.
+#
+# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
+# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+
+import sys
+import os
+import time
+import marshal
+from optparse import OptionParser
+
+__all__ = ["run", "runctx", "help", "Profile"]
+
+# Sample timer for use with
+#i_count = 0
+#def integer_timer():
+#       global i_count
+#       i_count = i_count + 1
+#       return i_count
+#itimes = integer_timer # replace with C coded timer returning integers
+
+#**************************************************************************
+# The following are the static member functions for the profiler class
+# Note that an instance of Profile() is *not* needed to call them.
+#**************************************************************************
+
+def run(statement, filename=None, sort=-1):
+    """Run statement under profiler optionally saving results in filename
+
+    This function takes a single argument that can be passed to the
+    "exec" statement, and an optional file name.  In all cases this
+    routine attempts to "exec" its first argument and gather profiling
+    statistics from the execution. If no file name is present, then this
+    function automatically prints a simple profiling report, sorted by the
+    standard name string (file/line/function-name) that is presented in
+    each line.
+    """
+    prof = Profile()
+    try:
+        prof = prof.run(statement)
+    except SystemExit:
+        pass
+    if filename is not None:
+        prof.dump_stats(filename)
+    else:
+        return prof.print_stats(sort)
+
+def runctx(statement, globals, locals, filename=None):
+    """Run statement under profiler, supplying your own globals and locals,
+    optionally saving results in filename.
+
+    statement and filename have the same semantics as profile.run
+    """
+    prof = Profile()
+    try:
+        prof = prof.runctx(statement, globals, locals)
+    except SystemExit:
+        pass
+
+    if filename is not None:
+        prof.dump_stats(filename)
+    else:
+        return prof.print_stats()
+
+# Backwards compatibility.
+def help():
+    print "Documentation for the profile module can be found "
+    print "in the Python Library Reference, section 'The Python Profiler'."
+
+if os.name == "mac":
+    import MacOS
+    def _get_time_mac(timer=MacOS.GetTicks):
+        return timer() / 60.0
+
+if hasattr(os, "times"):
+    def _get_time_times(timer=os.times):
+        t = timer()
+        return t[0] + t[1]
+
+# Using getrusage(3) is better than clock(3) if available:
+# on some systems (e.g. FreeBSD), getrusage has a higher resolution
+# Furthermore, on a POSIX system, returns microseconds, which
+# wrap around after 36min.
+_has_res = 0
+try:
+    import resource
+    resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
+    def _get_time_resource(timer=resgetrusage):
+        t = timer()
+        return t[0] + t[1]
+    _has_res = 1
+except ImportError:
+    pass
+
+class Profile:
+    """Profiler class.
+
+    self.cur is always a tuple.  Each such tuple corresponds to a stack
+    frame that is currently active (self.cur[-2]).  The following are the
+    definitions of its members.  We use this external "parallel stack" to
+    avoid contaminating the program that we are profiling. (old profiler
+    used to write into the frames local dictionary!!) Derived classes
+    can change the definition of some entries, as long as they leave
+    [-2:] intact (frame and previous tuple).  In case an internal error is
+    detected, the -3 element is used as the function name.
+
+    [ 0] = Time that needs to be charged to the parent frame's function.
+           It is used so that a function call will not have to access the
+           timing data for the parent frame.
+    [ 1] = Total time spent in this frame's function, excluding time in
+           subfunctions (this latter is tallied in cur[2]).
+    [ 2] = Total time spent in subfunctions, excluding time executing the
+           frame's function (this latter is tallied in cur[1]).
+    [-3] = Name of the function that corresponds to this frame.
+    [-2] = Actual frame that we correspond to (used to sync exception handling).
+    [-1] = Our parent 6-tuple (corresponds to frame.f_back).
+
+    Timing data for each function is stored as a 5-tuple in the dictionary
+    self.timings[].  The index is always the name stored in self.cur[-3].
+    The following are the definitions of the members:
+
+    [0] = The number of times this function was called, not counting direct
+          or indirect recursion,
+    [1] = Number of times this function appears on the stack, minus one
+    [2] = Total time spent internal to this function
+    [3] = Cumulative time that this function was present on the stack.  In
+          non-recursive functions, this is the total execution time from start
+          to finish of each invocation of a function, including time spent in
+          all subfunctions.
+    [4] = A dictionary indicating for each function name, the number of times
+          it was called by us.
+    """
+
+    bias = 0  # calibration constant
+
+    def __init__(self, timer=None, bias=None):
+        self.timings = {}
+        self.cur = None
+        self.cmd = ""
+        self.c_func_name = ""
+
+        if bias is None:
+            bias = self.bias
+        self.bias = bias     # Materialize in local dict for lookup speed.
+
+        if not timer:
+            if _has_res:
+                self.timer = resgetrusage
+                self.dispatcher = self.trace_dispatch
+                self.get_time = _get_time_resource
+            elif os.name == 'mac':
+                self.timer = MacOS.GetTicks
+                self.dispatcher = self.trace_dispatch_mac
+                self.get_time = _get_time_mac
+            elif hasattr(time, 'clock'):
+                self.timer = self.get_time = time.clock
+                self.dispatcher = self.trace_dispatch_i
+            elif hasattr(os, 'times'):
+                self.timer = os.times
+                self.dispatcher = self.trace_dispatch
+                self.get_time = _get_time_times
+            else:
+                self.timer = self.get_time = time.time
+                self.dispatcher = self.trace_dispatch_i
+        else:
+            self.timer = timer
+            t = self.timer() # test out timer function
+            try:
+                length = len(t)
+            except TypeError:
+                self.get_time = timer
+                self.dispatcher = self.trace_dispatch_i
+            else:
+                if length == 2:
+                    self.dispatcher = self.trace_dispatch
+                else:
+                    self.dispatcher = self.trace_dispatch_l
+                # This get_time() implementation needs to be defined
+                # here to capture the passed-in timer in the parameter
+                # list (for performance).  Note that we can't assume
+                # the timer() result contains two values in all
+                # cases.
+                def get_time_timer(timer=timer, sum=sum):
+                    return sum(timer())
+                self.get_time = get_time_timer
+        self.t = self.get_time()
+        self.simulate_call('profiler')
+
+    # Heavily optimized dispatch routine for os.times() timer
+
+    def trace_dispatch(self, frame, event, arg):
+        timer = self.timer
+        t = timer()
+        t = t[0] + t[1] - self.t - self.bias
+
+        if event == "c_call":
+            self.c_func_name = arg.__name__
+
+        if self.dispatch[event](self, frame,t):
+            t = timer()
+            self.t = t[0] + t[1]
+        else:
+            r = timer()
+            self.t = r[0] + r[1] - t # put back unrecorded delta
+
+    # Dispatch routine for best timer program (return = scalar, fastest if
+    # an integer but float works too -- and time.clock() relies on that).
+
+    def trace_dispatch_i(self, frame, event, arg):
+        timer = self.timer
+        t = timer() - self.t - self.bias
+
+        if event == "c_call":
+            self.c_func_name = arg.__name__
+
+        if self.dispatch[event](self, frame, t):
+            self.t = timer()
+        else:
+            self.t = timer() - t  # put back unrecorded delta
+
+    # Dispatch routine for macintosh (timer returns time in ticks of
+    # 1/60th second)
+
+    def trace_dispatch_mac(self, frame, event, arg):
+        timer = self.timer
+        t = timer()/60.0 - self.t - self.bias
+
+        if event == "c_call":
+            self.c_func_name = arg.__name__
+
+        if self.dispatch[event](self, frame, t):
+            self.t = timer()/60.0
+        else:
+            self.t = timer()/60.0 - t  # put back unrecorded delta
+
+    # SLOW generic dispatch routine for timer returning lists of numbers
+
+    def trace_dispatch_l(self, frame, event, arg):
+        get_time = self.get_time
+        t = get_time() - self.t - self.bias
+
+        if event == "c_call":
+            self.c_func_name = arg.__name__
+
+        if self.dispatch[event](self, frame, t):
+            self.t = get_time()
+        else:
+            self.t = get_time() - t # put back unrecorded delta
+
+    # In the event handlers, the first 3 elements of self.cur are unpacked
+    # into vrbls w/ 3-letter names.  The last two characters are meant to be
+    # mnemonic:
+    #     _pt  self.cur[0] "parent time"   time to be charged to parent frame
+    #     _it  self.cur[1] "internal time" time spent directly in the function
+    #     _et  self.cur[2] "external time" time spent in subfunctions
+
+    def trace_dispatch_exception(self, frame, t):
+        rpt, rit, ret, rfn, rframe, rcur = self.cur
+        if (rframe is not frame) and rcur:
+            return self.trace_dispatch_return(rframe, t)
+        self.cur = rpt, rit+t, ret, rfn, rframe, rcur
+        return 1
+
+
+    def trace_dispatch_call(self, frame, t):
+        if self.cur and frame.f_back is not self.cur[-2]:
+            rpt, rit, ret, rfn, rframe, rcur = self.cur
+            if not isinstance(rframe, Profile.fake_frame):
+                assert rframe.f_back is frame.f_back, ("Bad call", rfn,
+                                                       rframe, rframe.f_back,
+                                                       frame, frame.f_back)
+                self.trace_dispatch_return(rframe, 0)
+                assert (self.cur is None or \
+                        frame.f_back is self.cur[-2]), ("Bad call",
+                                                        self.cur[-3])
+        fcode = frame.f_code
+        fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
+        self.cur = (t, 0, 0, fn, frame, self.cur)
+        timings = self.timings
+        if fn in timings:
+            cc, ns, tt, ct, callers = timings[fn]
+            timings[fn] = cc, ns + 1, tt, ct, callers
+        else:
+            timings[fn] = 0, 0, 0, 0, {}
+        return 1
+
+    def trace_dispatch_c_call (self, frame, t):
+        fn = ("", 0, self.c_func_name)
+        self.cur = (t, 0, 0, fn, frame, self.cur)
+        timings = self.timings
+        if timings.has_key(fn):
+            cc, ns, tt, ct, callers = timings[fn]
+            timings[fn] = cc, ns+1, tt, ct, callers
+        else:
+            timings[fn] = 0, 0, 0, 0, {}
+        return 1
+
+    def trace_dispatch_return(self, frame, t):
+        if frame is not self.cur[-2]:
+            assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
+            self.trace_dispatch_return(self.cur[-2], 0)
+
+        # Prefix "r" means part of the Returning or exiting frame.
+        # Prefix "p" means part of the Previous or Parent or older frame.
+
+        rpt, rit, ret, rfn, frame, rcur = self.cur
+        rit = rit + t
+        frame_total = rit + ret
+
+        ppt, pit, pet, pfn, pframe, pcur = rcur
+        self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+
+        timings = self.timings
+        cc, ns, tt, ct, callers = timings[rfn]
+        if not ns:
+            # This is the only occurrence of the function on the stack.
+            # Else this is a (directly or indirectly) recursive call, and
+            # its cumulative time will get updated when the topmost call to
+            # it returns.
+            ct = ct + frame_total
+            cc = cc + 1
+
+        if pfn in callers:
+            callers[pfn] = callers[pfn] + 1  # hack: gather more
+            # stats such as the amount of time added to ct courtesy
+            # of this specific call, and the contribution to cc
+            # courtesy of this call.
+        else:
+            callers[pfn] = 1
+
+        timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+        return 1
+
+
+    dispatch = {
+        "call": trace_dispatch_call,
+        "exception": trace_dispatch_exception,
+        "return": trace_dispatch_return,
+        "c_call": trace_dispatch_c_call,
+        "c_exception": trace_dispatch_return,  # the C function returned
+        "c_return": trace_dispatch_return,
+        }
+
+
+    # The next few functions play with self.cmd. By carefully preloading
+    # our parallel stack, we can force the profiled result to include
+    # an arbitrary string as the name of the calling function.
+    # We use self.cmd as that string, and the resulting stats look
+    # very nice :-).
+
+    def set_cmd(self, cmd):
+        if self.cur[-1]: return   # already set
+        self.cmd = cmd
+        self.simulate_call(cmd)
+
+    class fake_code:
+        def __init__(self, filename, line, name):
+            self.co_filename = filename
+            self.co_line = line
+            self.co_name = name
+            self.co_firstlineno = 0
+
+        def __repr__(self):
+            return repr((self.co_filename, self.co_line, self.co_name))
+
+    class fake_frame:
+        def __init__(self, code, prior):
+            self.f_code = code
+            self.f_back = prior
+
+    def simulate_call(self, name):
+        code = self.fake_code('profile', 0, name)
+        if self.cur:
+            pframe = self.cur[-2]
+        else:
+            pframe = None
+        frame = self.fake_frame(code, pframe)
+        self.dispatch['call'](self, frame, 0)
+
+    # collect stats from pending stack, including getting final
+    # timings for self.cmd frame.
+
+    def simulate_cmd_complete(self):
+        get_time = self.get_time
+        t = get_time() - self.t
+        while self.cur[-1]:
+            # We *can* cause assertion errors here if
+            # dispatch_trace_return checks for a frame match!
+            self.dispatch['return'](self, self.cur[-2], t)
+            t = 0
+        self.t = get_time() - t
+
+
+    def print_stats(self, sort=-1):
+        import pstats
+        pstats.Stats(self).strip_dirs().sort_stats(sort). \
+                  print_stats()
+
+    def dump_stats(self, file):
+        f = open(file, 'wb')
+        self.create_stats()
+        marshal.dump(self.stats, f)
+        f.close()
+
+    def create_stats(self):
+        self.simulate_cmd_complete()
+        self.snapshot_stats()
+
+    def snapshot_stats(self):
+        self.stats = {}
+        for func, (cc, ns, tt, ct, callers) in self.timings.iteritems():
+            callers = callers.copy()
+            nc = 0
+            for callcnt in callers.itervalues():
+                nc += callcnt
+            self.stats[func] = cc, nc, tt, ct, callers
+
+
+    # The following two methods can be called by clients to use
+    # a profiler to profile a statement, given as a string.
+
+    def run(self, cmd):
+        import __main__
+        dict = __main__.__dict__
+        return self.runctx(cmd, dict, dict)
+
+    def runctx(self, cmd, globals, locals):
+        self.set_cmd(cmd)
+        sys.setprofile(self.dispatcher)
+        try:
+            exec cmd in globals, locals
+        finally:
+            sys.setprofile(None)
+        return self
+
+    # This method is more useful to profile a single function call.
+    def runcall(self, func, *args, **kw):
+        self.set_cmd(repr(func))
+        sys.setprofile(self.dispatcher)
+        try:
+            return func(*args, **kw)
+        finally:
+            sys.setprofile(None)
+
+
+    #******************************************************************
+    # The following calculates the overhead for using a profiler.  The
+    # problem is that it takes a fair amount of time for the profiler
+    # to stop the stopwatch (from the time it receives an event).
+    # Similarly, there is a delay from the time that the profiler
+    # re-starts the stopwatch before the user's code really gets to
+    # continue.  The following code tries to measure the difference on
+    # a per-event basis.
+    #
+    # Note that this difference is only significant if there are a lot of
+    # events, and relatively little user code per event.  For example,
+    # code with small functions will typically benefit from having the
+    # profiler calibrated for the current platform.  This *could* be
+    # done on the fly during init() time, but it is not worth the
+    # effort.  Also note that if too large a value specified, then
+    # execution time on some functions will actually appear as a
+    # negative number.  It is *normal* for some functions (with very
+    # low call counts) to have such negative stats, even if the
+    # calibration figure is "correct."
+    #
+    # One alternative to profile-time calibration adjustments (i.e.,
+    # adding in the magic little delta during each event) is to track
+    # more carefully the number of events (and cumulatively, the number
+    # of events during sub functions) that are seen.  If this were
+    # done, then the arithmetic could be done after the fact (i.e., at
+    # display time).  Currently, we track only call/return events.
+    # These values can be deduced by examining the callees and callers
+    # vectors for each functions.  Hence we *can* almost correct the
+    # internal time figure at print time (note that we currently don't
+    # track exception event processing counts).  Unfortunately, there
+    # is currently no similar information for cumulative sub-function
+    # time.  It would not be hard to "get all this info" at profiler
+    # time.  Specifically, we would have to extend the tuples to keep
+    # counts of this in each frame, and then extend the defs of timing
+    # tuples to include the significant two figures. I'm a bit fearful
+    # that this additional feature will slow the heavily optimized
+    # event/time ratio (i.e., the profiler would run slower, fur a very
+    # low "value added" feature.)
+    #**************************************************************
+
+    def calibrate(self, m, verbose=0):
+        if self.__class__ is not Profile:
+            raise TypeError("Subclasses must override .calibrate().")
+
+        saved_bias = self.bias
+        self.bias = 0
+        try:
+            return self._calibrate_inner(m, verbose)
+        finally:
+            self.bias = saved_bias
+
+    def _calibrate_inner(self, m, verbose):
+        get_time = self.get_time
+
+        # Set up a test case to be run with and without profiling.  Include
+        # lots of calls, because we're trying to quantify stopwatch overhead.
+        # Do not raise any exceptions, though, because we want to know
+        # exactly how many profile events are generated (one call event, +
+        # one return event, per Python-level call).
+
+        def f1(n):
+            for i in range(n):
+                x = 1
+
+        def f(m, f1=f1):
+            for i in range(m):
+                f1(100)
+
+        f(m)    # warm up the cache
+
+        # elapsed_noprofile <- time f(m) takes without profiling.
+        t0 = get_time()
+        f(m)
+        t1 = get_time()
+        elapsed_noprofile = t1 - t0
+        if verbose:
+            print "elapsed time without profiling =", elapsed_noprofile
+
+        # elapsed_profile <- time f(m) takes with profiling.  The difference
+        # is profiling overhead, only some of which the profiler subtracts
+        # out on its own.
+        p = Profile()
+        t0 = get_time()
+        p.runctx('f(m)', globals(), locals())
+        t1 = get_time()
+        elapsed_profile = t1 - t0
+        if verbose:
+            print "elapsed time with profiling =", elapsed_profile
+
+        # reported_time <- "CPU seconds" the profiler charged to f and f1.
+        total_calls = 0.0
+        reported_time = 0.0
+        for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
+                p.timings.items():
+            if funcname in ("f", "f1"):
+                total_calls += cc
+                reported_time += tt
+
+        if verbose:
+            print "'CPU seconds' profiler reported =", reported_time
+            print "total # calls =", total_calls
+        if total_calls != m + 1:
+            raise ValueError("internal error: total calls = %d" % total_calls)
+
+        # reported_time - elapsed_noprofile = overhead the profiler wasn't
+        # able to measure.  Divide by twice the number of calls (since there
+        # are two profiler events per call in this test) to get the hidden
+        # overhead per event.
+        mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
+        if verbose:
+            print "mean stopwatch overhead per profile event =", mean
+        return mean
+
+#****************************************************************************
+def Stats(*args):
+    print 'Report generating functions are in the "pstats" module\a'
+
+def main():
+    usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
+    parser = OptionParser(usage=usage)
+    parser.allow_interspersed_args = False
+    parser.add_option('-o', '--outfile', dest="outfile",
+        help="Save stats to <outfile>", default=None)
+    parser.add_option('-s', '--sort', dest="sort",
+        help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
+
+    if not sys.argv[1:]:
+        parser.print_usage()
+        sys.exit(2)
+
+    (options, args) = parser.parse_args()
+    sys.argv[:] = args
+
+    if (len(sys.argv) > 0):
+        sys.path.insert(0, os.path.dirname(sys.argv[0]))
+        run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
+    else:
+        parser.print_usage()
+    return parser
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+    main()

Added: pypy/trunk/lib-python/2.5.2/pstats.py
==============================================================================
--- (empty file)
+++ pypy/trunk/lib-python/2.5.2/pstats.py	Wed Jan 21 14:49:18 2009
@@ -0,0 +1,684 @@
+"""Class for printing reports on profiled python code."""
+
+# Class for printing reports on profiled python code. rev 1.0  4/1/94
+#
+# Based on prior profile module by Sjoerd Mullender...
+#   which was hacked somewhat by: Guido van Rossum
+#
+# see profile.doc and profile.py for more info.
+
+# Copyright 1994, by InfoSeek Corporation, all rights reserved.
+# Written by James Roskind
+#
+# Permission to use, copy, modify, and distribute this Python software
+# and its associated documentation for any purpose (subject to the
+# restriction in the following sentence) without fee is hereby granted,
+# provided that the above copyright notice appears in all copies, and
+# that both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of InfoSeek not be used in
+# advertising or publicity pertaining to distribution of the software
+# without specific, written prior permission.  This permission is
+# explicitly restricted to the copying and modification of the software
+# to remain in Python, compiled Python, or other languages (such as C)
+# wherein the modified or derived code is exclusively imported into a
+# Python module.
+#
+# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
+# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
+# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
+# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+
+import sys
+import os
+import time
+import marshal
+import re
+
+__all__ = ["Stats"]
+
+class Stats:
+    """This class is used for creating reports from data generated by the
+    Profile class.  It is a "friend" of that class, and imports data either
+    by direct access to members of Profile class, or by reading in a dictionary
+    that was emitted (via marshal) from the Profile class.
+
+    The big change from the previous Profiler (in terms of raw functionality)
+    is that an "add()" method has been provided to combine Stats from
+    several distinct profile runs.  Both the constructor and the add()
+    method now take arbitrarily many file names as arguments.
+
+    All the print methods now take an argument that indicates how many lines
+    to print.  If the arg is a floating point number between 0 and 1.0, then
+    it is taken as a decimal percentage of the available lines to be printed
+    (e.g., .1 means print 10% of all available lines).  If it is an integer,
+    it is taken to mean the number of lines of data that you wish to have
+    printed.
+
+    The sort_stats() method now processes some additional options (i.e., in
+    addition to the old -1, 0, 1, or 2).  It takes an arbitrary number of
+    quoted strings to select the sort order.  For example sort_stats('time',
+    'name') sorts on the major key of 'internal function time', and on the
+    minor key of 'the name of the function'.  Look at the two tables in
+    sort_stats() and get_sort_arg_defs(self) for more examples.
+
+    All methods return self,  so you can string together commands like:
+        Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
+                            print_stats(5).print_callers(5)
+    """
+
+    def __init__(self, *args, **kwds):
+        # I can't figure out how to explictly specify a stream keyword arg
+        # with *args:
+        #   def __init__(self, *args, stream=sys.stdout): ...
+        # so I use **kwds and sqauwk if something unexpected is passed in.
+        self.stream = sys.stdout
+        if "stream" in kwds:
+            self.stream = kwds["stream"]
+            del kwds["stream"]
+        if kwds:
+            keys = kwds.keys()
+            keys.sort()
+            extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
+            raise ValueError, "unrecognized keyword args: %s" % extras
+        if not len(args):
+            arg = None
+        else:
+            arg = args[0]
+            args = args[1:]
+        self.init(arg)
+        self.add(*args)
+
+    def init(self, arg):
+        self.all_callees = None  # calc only if needed
+        self.files = []
+        self.fcn_list = None
+        self.total_tt = 0
+        self.total_calls = 0
+        self.prim_calls = 0
+        self.max_name_len = 0
+        self.top_level = {}
+        self.stats = {}
+        self.sort_arg_dict = {}
+        self.load_stats(arg)
+        trouble = 1
+        try:
+            self.get_top_level_stats()
+            trouble = 0
+        finally:
+            if trouble:
+                print >> self.stream, "Invalid timing data",
+                if self.files: print >> self.stream, self.files[-1],
+                print >> self.stream
+
+    def load_stats(self, arg):
+        if not arg:  self.stats = {}
+        elif isinstance(arg, basestring):
+            f = open(arg, 'rb')
+            self.stats = marshal.load(f)
+            f.close()
+            try:
+                file_stats = os.stat(arg)
+                arg = time.ctime(file_stats.st_mtime) + "    " + arg
+            except:  # in case this is not unix
+                pass
+            self.files = [ arg ]
+        elif hasattr(arg, 'create_stats'):
+            arg.create_stats()
+            self.stats = arg.stats
+            arg.stats = {}
+        if not self.stats:
+            raise TypeError,  "Cannot create or construct a %r object from '%r''" % (
+                              self.__class__, arg)
+        return
+
+    def get_top_level_stats(self):
+        for func, (cc, nc, tt, ct, callers) in self.stats.items():
+            self.total_calls += nc
+            self.prim_calls  += cc
+            self.total_tt    += tt
+            if callers.has_key(("jprofile", 0, "profiler")):
+                self.top_level[func] = None
+            if len(func_std_string(func)) > self.max_name_len:
+                self.max_name_len = len(func_std_string(func))
+
+    def add(self, *arg_list):
+        if not arg_list: return self
+        if len(arg_list) > 1: self.add(*arg_list[1:])
+        other = arg_list[0]
+        if type(self) != type(other) or self.__class__ != other.__class__:
+            other = Stats(other)
+        self.files += other.files
+        self.total_calls += other.total_calls
+        self.prim_calls += other.prim_calls
+        self.total_tt += other.total_tt
+        for func in other.top_level:
+            self.top_level[func] = None
+
+        if self.max_name_len < other.max_name_len:
+            self.max_name_len = other.max_name_len
+
+        self.fcn_list = None
+
+        for func, stat in other.stats.iteritems():
+            if func in self.stats:
+                old_func_stat = self.stats[func]
+            else:
+                old_func_stat = (0, 0, 0, 0, {},)
+            self.stats[func] = add_func_stats(old_func_stat, stat)
+        return self
+
+    def dump_stats(self, filename):
+        """Write the profile data to a file we know how to load back."""
+        f = file(filename, 'wb')
+        try:
+            marshal.dump(self.stats, f)
+        finally:
+            f.close()
+
+    # list the tuple indices and directions for sorting,
+    # along with some printable description
+    sort_arg_dict_default = {
+              "calls"     : (((1,-1),              ), "call count"),
+              "cumulative": (((3,-1),              ), "cumulative time"),
+              "file"      : (((4, 1),              ), "file name"),
+              "line"      : (((5, 1),              ), "line number"),
+              "module"    : (((4, 1),              ), "file name"),
+              "name"      : (((6, 1),              ), "function name"),
+              "nfl"       : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
+              "pcalls"    : (((0,-1),              ), "call count"),
+              "stdname"   : (((7, 1),              ), "standard name"),
+              "time"      : (((2,-1),              ), "internal time"),
+              }
+
+    def get_sort_arg_defs(self):
+        """Expand all abbreviations that are unique."""
+        if not self.sort_arg_dict:
+            self.sort_arg_dict = dict = {}
+            bad_list = {}
+            for word, tup in self.sort_arg_dict_default.iteritems():
+                fragment = word
+                while fragment:
+                    if not fragment:
+                        break
+                    if fragment in dict:
+                        bad_list[fragment] = 0
+                        break
+                    dict[fragment] = tup
+                    fragment = fragment[:-1]
+            for word in bad_list:
+                del dict[word]
+        return self.sort_arg_dict
+
+    def sort_stats(self, *field):
+        if not field:
+            self.fcn_list = 0
+            return self
+        if len(field) == 1 and type(field[0]) == type(1):
+            # Be compatible with old profiler
+            field = [ {-1: "stdname",
+                      0:"calls",
+                      1:"time",
+                      2: "cumulative" }  [ field[0] ] ]
+
+        sort_arg_defs = self.get_sort_arg_defs()
+        sort_tuple = ()
+        self.sort_type = ""
+        connector = ""
+        for word in field:
+            sort_tuple = sort_tuple + sort_arg_defs[word][0]
+            self.sort_type += connector + sort_arg_defs[word][1]
+            connector = ", "
+
+        stats_list = []
+        for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
+            stats_list.append((cc, nc, tt, ct) + func +
+                              (func_std_string(func), func))
+
+        stats_list.sort(TupleComp(sort_tuple).compare)
+
+        self.fcn_list = fcn_list = []
+        for tuple in stats_list:
+            fcn_list.append(tuple[-1])
+        return self
+
+    def reverse_order(self):
+        if self.fcn_list:
+            self.fcn_list.reverse()
+        return self
+
+    def strip_dirs(self):
+        oldstats = self.stats
+        self.stats = newstats = {}
+        max_name_len = 0
+        for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
+            newfunc = func_strip_path(func)
+            if len(func_std_string(newfunc)) > max_name_len:
+                max_name_len = len(func_std_string(newfunc))
+            newcallers = {}
+            for func2, caller in callers.iteritems():
+                newcallers[func_strip_path(func2)] = caller
+
+            if newfunc in newstats:
+                newstats[newfunc] = add_func_stats(
+                                        newstats[newfunc],
+                                        (cc, nc, tt, ct, newcallers))
+            else:
+                newstats[newfunc] = (cc, nc, tt, ct, newcallers)
+        old_top = self.top_level
+        self.top_level = new_top = {}
+        for func in old_top:
+            new_top[func_strip_path(func)] = None
+
+        self.max_name_len = max_name_len
+
+        self.fcn_list = None
+        self.all_callees = None
+        return self
+
+    def calc_callees(self):
+        if self.all_callees: return
+        self.all_callees = all_callees = {}
+        for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
+            if not func in all_callees:
+                all_callees[func] = {}
+            for func2, caller in callers.iteritems():
+                if not func2 in all_callees:
+                    all_callees[func2] = {}
+                all_callees[func2][func]  = caller
+        return
+
+    #******************************************************************
+    # The following functions support actual printing of reports
+    #******************************************************************
+
+    # Optional "amount" is either a line count, or a percentage of lines.
+
+    def eval_print_amount(self, sel, list, msg):
+        new_list = list
+        if type(sel) == type(""):
+            new_list = []
+            for func in list:
+                if re.search(sel, func_std_string(func)):
+                    new_list.append(func)
+        else:
+            count = len(list)
+            if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
+                count = int(count * sel + .5)
+                new_list = list[:count]
+            elif type(sel) == type(1) and 0 <= sel < count:
+                count = sel
+                new_list = list[:count]
+        if len(list) != len(new_list):
+            msg = msg + "   List reduced from %r to %r due to restriction <%r>\n" % (
+                         len(list), len(new_list), sel)
+
+        return new_list, msg
+
+    def get_print_list(self, sel_list):
+        width = self.max_name_len
+        if self.fcn_list:
+            list = self.fcn_list[:]
+            msg = "   Ordered by: " + self.sort_type + '\n'
+        else:
+            list = self.stats.keys()
+            msg = "   Random listing order was used\n"
+
+        for selection in sel_list:
+            list, msg = self.eval_print_amount(selection, list, msg)
+
+        count = len(list)
+
+        if not list:
+            return 0, list
+        print >> self.stream, msg
+        if count < len(self.stats):
+            width = 0
+            for func in list:
+                if  len(func_std_string(func)) > width:
+                    width = len(func_std_string(func))
+        return width+2, list
+
+    def print_stats(self, *amount):
+        for filename in self.files:
+            print >> self.stream, filename
+        if self.files: print >> self.stream
+        indent = ' ' * 8
+        for func in self.top_level:
+            print >> self.stream, indent, func_get_function_name(func)
+
+        print >> self.stream, indent, self.total_calls, "function calls",
+        if self.total_calls != self.prim_calls:
+            print >> self.stream, "(%d primitive calls)" % self.prim_calls,
+        print >> self.stream, "in %.3f CPU seconds" % self.total_tt
+        print >> self.stream
+        width, list = self.get_print_list(amount)
+        if list:
+            self.print_title()
+            for func in list:
+                self.print_line(func)
+            print >> self.stream
+            print >> self.stream
+        return self
+
+    def print_callees(self, *amount):
+        width, list = self.get_print_list(amount)
+        if list:
+            self.calc_callees()
+
+            self.print_call_heading(width, "called...")
+            for func in list:
+                if func in self.all_callees:
+                    self.print_call_line(width, func, self.all_callees[func])
+                else:
+                    self.print_call_line(width, func, {})
+            print >> self.stream
+            print >> self.stream
+        return self
+
+    def print_callers(self, *amount):
+        width, list = self.get_print_list(amount)
+        if list:
+            self.print_call_heading(width, "was called by...")
+            for func in list:
+                cc, nc, tt, ct, callers = self.stats[func]
+                self.print_call_line(width, func, callers, "<-")
+            print >> self.stream
+            print >> self.stream
+        return self
+
+    def print_call_heading(self, name_size, column_title):
+        print >> self.stream, "Function ".ljust(name_size) + column_title
+        # print sub-header only if we have new-style callers
+        subheader = False
+        for cc, nc, tt, ct, callers in self.stats.itervalues():
+            if callers:
+                value = callers.itervalues().next()
+                subheader = isinstance(value, tuple)
+                break
+        if subheader:
+            print >> self.stream, " "*name_size + "    ncalls  tottime  cumtime"
+
+    def print_call_line(self, name_size, source, call_dict, arrow="->"):
+        print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
+        if not call_dict:
+            print >> self.stream
+            return
+        clist = call_dict.keys()
+        clist.sort()
+        indent = ""
+        for func in clist:
+            name = func_std_string(func)
+            value = call_dict[func]
+            if isinstance(value, tuple):
+                nc, cc, tt, ct = value
+                if nc != cc:
+                    substats = '%d/%d' % (nc, cc)
+                else:
+                    substats = '%d' % (nc,)
+                substats = '%s %s %s  %s' % (substats.rjust(7+2*len(indent)),
+                                             f8(tt), f8(ct), name)
+                left_width = name_size + 1
+            else:
+                substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
+                left_width = name_size + 3
+            print >> self.stream, indent*left_width + substats
+            indent = " "
+
+    def print_title(self):
+        print >> self.stream, '   ncalls  tottime  percall  cumtime  percall',
+        print >> self.stream, 'filename:lineno(function)'
+
+    def print_line(self, func):  # hack : should print percentages
+        cc, nc, tt, ct, callers = self.stats[func]
+        c = str(nc)
+        if nc != cc:
+            c = c + '/' + str(cc)
+        print >> self.stream, c.rjust(9),
+        print >> self.stream, f8(tt),
+        if nc == 0:
+            print >> self.stream, ' '*8,
+        else:
+            print >> self.stream, f8(tt/nc),
+        print >> self.stream, f8(ct),
+        if cc == 0:
+            print >> self.stream, ' '*8,
+        else:
+            print >> self.stream, f8(ct/cc),
+        print >> self.stream, func_std_string(func)
+
+class TupleComp:
+    """This class provides a generic function for comparing any two tuples.
+    Each instance records a list of tuple-indices (from most significant
+    to least significant), and sort direction (ascending or decending) for
+    each tuple-index.  The compare functions can then be used as the function
+    argument to the system sort() function when a list of tuples need to be
+    sorted in the instances order."""
+
+    def __init__(self, comp_select_list):
+        self.comp_select_list = comp_select_list
+
+    def compare (self, left, right):
+        for index, direction in self.comp_select_list:
+            l = left[index]
+            r = right[index]
+            if l < r:
+                return -direction
+            if l > r:
+                return direction
+        return 0
+
+#**************************************************************************
+# func_name is a triple (file:string, line:int, name:string)
+
+def func_strip_path(func_name):
+    filename, line, name = func_name
+    return os.path.basename(filename), line, name
+
+def func_get_function_name(func):
+    return func[2]
+
+def func_std_string(func_name): # match what old profile produced
+    if func_name[:2] == ('~', 0):
+        # special case for built-in functions
+        name = func_name[2]
+        if name.startswith('<') and name.endswith('>'):
+            return '{%s}' % name[1:-1]
+        else:
+            return name
+    else:
+        return "%s:%d(%s)" % func_name
+
+#**************************************************************************
+# The following functions combine statists for pairs functions.
+# The bulk of the processing involves correctly handling "call" lists,
+# such as callers and callees.
+#**************************************************************************
+
+def add_func_stats(target, source):
+    """Add together all the stats for two profile entries."""
+    cc, nc, tt, ct, callers = source
+    t_cc, t_nc, t_tt, t_ct, t_callers = target
+    return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
+              add_callers(t_callers, callers))
+
+def add_callers(target, source):
+    """Combine two caller lists in a single list."""
+    new_callers = {}
+    for func, caller in target.iteritems():
+        new_callers[func] = caller
+    for func, caller in source.iteritems():
+        if func in new_callers:
+            new_callers[func] = caller + new_callers[func]
+        else:
+            new_callers[func] = caller
+    return new_callers
+
+def count_calls(callers):
+    """Sum the caller statistics to get total number of calls received."""
+    nc = 0
+    for calls in callers.itervalues():
+        nc += calls
+    return nc
+
+#**************************************************************************
+# The following functions support printing of reports
+#**************************************************************************
+
+def f8(x):
+    return "%8.3f" % x
+
+#**************************************************************************
+# Statistics browser added by ESR, April 2001
+#**************************************************************************
+
+if __name__ == '__main__':
+    import cmd
+    try:
+        import readline
+    except ImportError:
+        pass
+
+    class ProfileBrowser(cmd.Cmd):
+        def __init__(self, profile=None):
+            cmd.Cmd.__init__(self)
+            self.prompt = "% "
+            if profile is not None:
+                self.stats = Stats(profile)
+                self.stream = self.stats.stream
+            else:
+                self.stats = None
+                self.stream = sys.stdout
+
+        def generic(self, fn, line):
+            args = line.split()
+            processed = []
+            for term in args:
+                try:
+                    processed.append(int(term))
+                    continue
+                except ValueError:
+                    pass
+                try:
+                    frac = float(term)
+                    if frac > 1 or frac < 0:
+                        print >> self.stream, "Fraction argument must be in [0, 1]"
+                        continue
+                    processed.append(frac)
+                    continue
+                except ValueError:
+                    pass
+                processed.append(term)
+            if self.stats:
+                getattr(self.stats, fn)(*processed)
+            else:
+                print >> self.stream, "No statistics object is loaded."
+            return 0
+        def generic_help(self):
+            print >> self.stream, "Arguments may be:"
+            print >> self.stream, "* An integer maximum number of entries to print."
+            print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
+            print >> self.stream, "  what fraction of selected entries to print."
+            print >> self.stream, "* A regular expression; only entries with function names"
+            print >> self.stream, "  that match it are printed."
+
+        def do_add(self, line):
+            self.stats.add(line)
+            return 0
+        def help_add(self):
+            print >> self.stream, "Add profile info from given file to current statistics object."
+
+        def do_callees(self, line):
+            return self.generic('print_callees', line)
+        def help_callees(self):
+            print >> self.stream, "Print callees statistics from the current stat object."
+            self.generic_help()
+
+        def do_callers(self, line):
+            return self.generic('print_callers', line)
+        def help_callers(self):
+            print >> self.stream, "Print callers statistics from the current stat object."
+            self.generic_help()
+
+        def do_EOF(self, line):
+            print >> self.stream, ""
+            return 1
+        def help_EOF(self):
+            print >> self.stream, "Leave the profile brower."
+
+        def do_quit(self, line):
+            return 1
+        def help_quit(self):
+            print >> self.stream, "Leave the profile brower."
+
+        def do_read(self, line):
+            if line:
+                try:
+                    self.stats = Stats(line)
+                except IOError, args:
+                    print >> self.stream, args[1]
+                    return
+                self.prompt = line + "% "
+            elif len(self.prompt) > 2:
+                line = self.prompt[-2:]
+            else:
+                print >> self.stream, "No statistics object is current -- cannot reload."
+            return 0
+        def help_read(self):
+            print >> self.stream, "Read in profile data from a specified file."
+
+        def do_reverse(self, line):
+            self.stats.reverse_order()
+            return 0
+        def help_reverse(self):
+            print >> self.stream, "Reverse the sort order of the profiling report."
+
+        def do_sort(self, line):
+            abbrevs = self.stats.get_sort_arg_defs()
+            if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
+                self.stats.sort_stats(*line.split())
+            else:
+                print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
+                for (key, value) in Stats.sort_arg_dict_default.iteritems():
+                    print >> self.stream, "%s -- %s" % (key, value[1])
+            return 0
+        def help_sort(self):
+            print >> self.stream, "Sort profile data according to specified keys."
+            print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
+        def complete_sort(self, text, *args):
+            return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
+
+        def do_stats(self, line):
+            return self.generic('print_stats', line)
+        def help_stats(self):
+            print >> self.stream, "Print statistics from the current stat object."
+            self.generic_help()
+
+        def do_strip(self, line):
+            self.stats.strip_dirs()
+            return 0
+        def help_strip(self):
+            print >> self.stream, "Strip leading path information from filenames in the report."
+
+        def postcmd(self, stop, line):
+            if stop:
+                return stop
+            return None
+
+    import sys
+    if len(sys.argv) > 1:
+        initprofile = sys.argv[1]
+    else:
+        initprofile = None
+    try:
+        browser = ProfileBrowser(initprofile)
+        print >> browser.stream, "Welcome to the profile statistics browser."
+        browser.cmdloop()
+        print >> browser.stream, "Goodbye."
+    except KeyboardInterrupt:
+        pass
+
+# That's all, folks.



More information about the Pypy-commit mailing list