[pypy-commit] pypy pytest-2.9.2: Add missing _pytest files

rlamy pypy.commits at gmail.com
Tue Nov 15 14:45:19 EST 2016


Author: Ronan Lamy <ronan.lamy at gmail.com>
Branch: pytest-2.9.2
Changeset: r88397:abcd2a32778c
Date: 2016-11-15 19:44 +0000
http://bitbucket.org/pypy/pypy/changeset/abcd2a32778c/

Log:	Add missing _pytest files

diff too long, truncating to 2000 out of 2510 lines

diff --git a/_pytest/_code/__init__.py b/_pytest/_code/__init__.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_code/__init__.py
@@ -0,0 +1,12 @@
+""" python inspection/code generation API """
+from .code import Code  # noqa
+from .code import ExceptionInfo  # noqa
+from .code import Frame  # noqa
+from .code import Traceback  # noqa
+from .code import getrawcode  # noqa
+from .code import patch_builtins  # noqa
+from .code import unpatch_builtins  # noqa
+from .source import Source  # noqa
+from .source import compile_ as compile  # noqa
+from .source import getfslineno  # noqa
+
diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_code/_py2traceback.py
@@ -0,0 +1,81 @@
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+    """Format the exception part of a traceback.
+
+    The arguments are the exception type and value such as given by
+    sys.last_type and sys.last_value. The return value is a list of
+    strings, each ending in a newline.
+
+    Normally, the list contains a single string; however, for
+    SyntaxError exceptions, it contains several lines that (when
+    printed) display detailed information about where the syntax
+    error occurred.
+
+    The message indicating which exception occurred is always the last
+    string in the list.
+
+    """
+
+    # An instance should not have a meaningful value parameter, but
+    # sometimes does, particularly for string exceptions, such as
+    # >>> raise string1, string2  # deprecated
+    #
+    # Clear these out first because issubtype(string1, SyntaxError)
+    # would throw another exception and mask the original problem.
+    if (isinstance(etype, BaseException) or
+        isinstance(etype, types.InstanceType) or
+        etype is None or type(etype) is str):
+        return [_format_final_exc_line(etype, value)]
+
+    stype = etype.__name__
+
+    if not issubclass(etype, SyntaxError):
+        return [_format_final_exc_line(stype, value)]
+
+    # It was a syntax error; show exactly where the problem was found.
+    lines = []
+    try:
+        msg, (filename, lineno, offset, badline) = value.args
+    except Exception:
+        pass
+    else:
+        filename = filename or "<string>"
+        lines.append('  File "%s", line %d\n' % (filename, lineno))
+        if badline is not None:
+            if isinstance(badline, bytes):  # python 2 only
+                badline = badline.decode('utf-8', 'replace')
+            lines.append(u'    %s\n' % badline.strip())
+            if offset is not None:
+                caretspace = badline.rstrip('\n')[:offset].lstrip()
+                # non-space whitespace (likes tabs) must be kept for alignment
+                caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+                # only three spaces to account for offset1 == pos 0
+                lines.append('   %s^\n' % ''.join(caretspace))
+        value = msg
+
+    lines.append(_format_final_exc_line(stype, value))
+    return lines
+
+def _format_final_exc_line(etype, value):
+    """Return a list of a single line -- normal case for format_exception_only"""
+    valuestr = _some_str(value)
+    if value is None or not valuestr:
+        line = "%s\n" % etype
+    else:
+        line = "%s: %s\n" % (etype, valuestr)
+    return line
+
+def _some_str(value):
+    try:
+        return unicode(value)
+    except Exception:
+        try:
+            return str(value)
+        except Exception:
+            pass
+    return '<unprintable %s object>' % type(value).__name__
diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_code/code.py
@@ -0,0 +1,805 @@
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+import py
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+    from traceback import format_exception_only
+else:
+    from ._py2traceback import format_exception_only
+
+class Code(object):
+    """ wrapper around Python code objects """
+    def __init__(self, rawcode):
+        if not hasattr(rawcode, "co_filename"):
+            rawcode = getrawcode(rawcode)
+        try:
+            self.filename = rawcode.co_filename
+            self.firstlineno = rawcode.co_firstlineno - 1
+            self.name = rawcode.co_name
+        except AttributeError:
+            raise TypeError("not a code object: %r" %(rawcode,))
+        self.raw = rawcode
+
+    def __eq__(self, other):
+        return self.raw == other.raw
+
+    def __ne__(self, other):
+        return not self == other
+
+    @property
+    def path(self):
+        """ return a path object pointing to source code (note that it
+        might not point to an actually existing file). """
+        p = py.path.local(self.raw.co_filename)
+        # maybe don't try this checking
+        if not p.check():
+            # XXX maybe try harder like the weird logic
+            # in the standard lib [linecache.updatecache] does?
+            p = self.raw.co_filename
+        return p
+
+    @property
+    def fullsource(self):
+        """ return a _pytest._code.Source object for the full source file of the code
+        """
+        from _pytest._code import source
+        full, _ = source.findsource(self.raw)
+        return full
+
+    def source(self):
+        """ return a _pytest._code.Source object for the code object's source only
+        """
+        # return source only for that part of code
+        import _pytest._code
+        return _pytest._code.Source(self.raw)
+
+    def getargs(self, var=False):
+        """ return a tuple with the argument names for the code object
+
+            if 'var' is set True also return the names of the variable and
+            keyword arguments when present
+        """
+        # handfull shortcut for getting args
+        raw = self.raw
+        argcount = raw.co_argcount
+        if var:
+            argcount += raw.co_flags & CO_VARARGS
+            argcount += raw.co_flags & CO_VARKEYWORDS
+        return raw.co_varnames[:argcount]
+
+class Frame(object):
+    """Wrapper around a Python frame holding f_locals and f_globals
+    in which expressions can be evaluated."""
+
+    def __init__(self, frame):
+        self.lineno = frame.f_lineno - 1
+        self.f_globals = frame.f_globals
+        self.f_locals = frame.f_locals
+        self.raw = frame
+        self.code = Code(frame.f_code)
+
+    @property
+    def statement(self):
+        """ statement this frame is at """
+        import _pytest._code
+        if self.code.fullsource is None:
+            return _pytest._code.Source("")
+        return self.code.fullsource.getstatement(self.lineno)
+
+    def eval(self, code, **vars):
+        """ evaluate 'code' in the frame
+
+            'vars' are optional additional local variables
+
+            returns the result of the evaluation
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        return eval(code, self.f_globals, f_locals)
+
+    def exec_(self, code, **vars):
+        """ exec 'code' in the frame
+
+            'vars' are optiona; additional local variables
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        py.builtin.exec_(code, self.f_globals, f_locals )
+
+    def repr(self, object):
+        """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+        """
+        return py.io.saferepr(object)
+
+    def is_true(self, object):
+        return object
+
+    def getargs(self, var=False):
+        """ return a list of tuples (name, value) for all arguments
+
+            if 'var' is set True also include the variable and keyword
+            arguments when present
+        """
+        retval = []
+        for arg in self.code.getargs(var):
+            try:
+                retval.append((arg, self.f_locals[arg]))
+            except KeyError:
+                pass     # this can occur when using Psyco
+        return retval
+
+class TracebackEntry(object):
+    """ a single entry in a traceback """
+
+    _repr_style = None
+    exprinfo = None
+
+    def __init__(self, rawentry):
+        self._rawentry = rawentry
+        self.lineno = rawentry.tb_lineno - 1
+
+    def set_repr_style(self, mode):
+        assert mode in ("short", "long")
+        self._repr_style = mode
+
+    @property
+    def frame(self):
+        import _pytest._code
+        return _pytest._code.Frame(self._rawentry.tb_frame)
+
+    @property
+    def relline(self):
+        return self.lineno - self.frame.code.firstlineno
+
+    def __repr__(self):
+        return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+    @property
+    def statement(self):
+        """ _pytest._code.Source object for the current statement """
+        source = self.frame.code.fullsource
+        return source.getstatement(self.lineno)
+
+    @property
+    def path(self):
+        """ path to the source code """
+        return self.frame.code.path
+
+    def getlocals(self):
+        return self.frame.f_locals
+    locals = property(getlocals, None, None, "locals of underlaying frame")
+
+    def reinterpret(self):
+        """Reinterpret the failing statement and returns a detailed information
+           about what operations are performed."""
+        from _pytest.assertion.reinterpret import reinterpret
+        if self.exprinfo is None:
+            source = py.builtin._totext(self.statement).strip()
+            x = reinterpret(source, self.frame, should_fail=True)
+            if not py.builtin._istext(x):
+                raise TypeError("interpret returned non-string %r" % (x,))
+            self.exprinfo = x
+        return self.exprinfo
+
+    def getfirstlinesource(self):
+        # on Jython this firstlineno can be -1 apparently
+        return max(self.frame.code.firstlineno, 0)
+
+    def getsource(self, astcache=None):
+        """ return failing source code. """
+        # we use the passed in astcache to not reparse asttrees
+        # within exception info printing
+        from _pytest._code.source import getstatementrange_ast
+        source = self.frame.code.fullsource
+        if source is None:
+            return None
+        key = astnode = None
+        if astcache is not None:
+            key = self.frame.code.path
+            if key is not None:
+                astnode = astcache.get(key, None)
+        start = self.getfirstlinesource()
+        try:
+            astnode, _, end = getstatementrange_ast(self.lineno, source,
+                                                    astnode=astnode)
+        except SyntaxError:
+            end = self.lineno + 1
+        else:
+            if key is not None:
+                astcache[key] = astnode
+        return source[start:end]
+
+    source = property(getsource)
+
+    def ishidden(self):
+        """ return True if the current frame has a var __tracebackhide__
+            resolving to True
+
+            mostly for internal use
+        """
+        try:
+            return self.frame.f_locals['__tracebackhide__']
+        except KeyError:
+            try:
+                return self.frame.f_globals['__tracebackhide__']
+            except KeyError:
+                return False
+
+    def __str__(self):
+        try:
+            fn = str(self.path)
+        except py.error.Error:
+            fn = '???'
+        name = self.frame.code.name
+        try:
+            line = str(self.statement).lstrip()
+        except KeyboardInterrupt:
+            raise
+        except:
+            line = "???"
+        return "  File %r:%d in %s\n  %s\n" %(fn, self.lineno+1, name, line)
+
+    def name(self):
+        return self.frame.code.raw.co_name
+    name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+    """ Traceback objects encapsulate and offer higher level
+        access to Traceback entries.
+    """
+    Entry = TracebackEntry
+    def __init__(self, tb):
+        """ initialize from given python traceback object. """
+        if hasattr(tb, 'tb_next'):
+            def f(cur):
+                while cur is not None:
+                    yield self.Entry(cur)
+                    cur = cur.tb_next
+            list.__init__(self, f(tb))
+        else:
+            list.__init__(self, tb)
+
+    def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+        """ return a Traceback instance wrapping part of this Traceback
+
+            by provding any combination of path, lineno and firstlineno, the
+            first frame to start the to-be-returned traceback is determined
+
+            this allows cutting the first part of a Traceback instance e.g.
+            for formatting reasons (removing some uninteresting bits that deal
+            with handling of the exception/traceback)
+        """
+        for x in self:
+            code = x.frame.code
+            codepath = code.path
+            if ((path is None or codepath == path) and
+                (excludepath is None or not hasattr(codepath, 'relto') or
+                 not codepath.relto(excludepath)) and
+                (lineno is None or x.lineno == lineno) and
+                (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+                return Traceback(x._rawentry)
+        return self
+
+    def __getitem__(self, key):
+        val = super(Traceback, self).__getitem__(key)
+        if isinstance(key, type(slice(0))):
+            val = self.__class__(val)
+        return val
+
+    def filter(self, fn=lambda x: not x.ishidden()):
+        """ return a Traceback instance with certain items removed
+
+            fn is a function that gets a single argument, a TracebackEntry
+            instance, and should return True when the item should be added
+            to the Traceback, False when not
+
+            by default this removes all the TracebackEntries which are hidden
+            (see ishidden() above)
+        """
+        return Traceback(filter(fn, self))
+
+    def getcrashentry(self):
+        """ return last non-hidden traceback entry that lead
+        to the exception of a traceback.
+        """
+        for i in range(-1, -len(self)-1, -1):
+            entry = self[i]
+            if not entry.ishidden():
+                return entry
+        return self[-1]
+
+    def recursionindex(self):
+        """ return the index of the frame/TracebackEntry where recursion
+            originates if appropriate, None if no recursion occurred
+        """
+        cache = {}
+        for i, entry in enumerate(self):
+            # id for the code.raw is needed to work around
+            # the strange metaprogramming in the decorator lib from pypi
+            # which generates code objects that have hash/value equality
+            #XXX needs a test
+            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+            #print "checking for recursion at", key
+            l = cache.setdefault(key, [])
+            if l:
+                f = entry.frame
+                loc = f.f_locals
+                for otherloc in l:
+                    if f.is_true(f.eval(co_equal,
+                        __recursioncache_locals_1=loc,
+                        __recursioncache_locals_2=otherloc)):
+                        return i
+            l.append(entry.frame.f_locals)
+        return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+                   '?', 'eval')
+
+class ExceptionInfo(object):
+    """ wraps sys.exc_info() objects and offers
+        help for navigating the traceback.
+    """
+    _striptext = ''
+    def __init__(self, tup=None, exprinfo=None):
+        import _pytest._code
+        if tup is None:
+            tup = sys.exc_info()
+            if exprinfo is None and isinstance(tup[1], AssertionError):
+                exprinfo = getattr(tup[1], 'msg', None)
+                if exprinfo is None:
+                    exprinfo = str(tup[1])
+                if exprinfo and exprinfo.startswith('assert '):
+                    self._striptext = 'AssertionError: '
+        self._excinfo = tup
+        #: the exception class
+        self.type = tup[0]
+        #: the exception instance
+        self.value = tup[1]
+        #: the exception raw traceback
+        self.tb = tup[2]
+        #: the exception type name
+        self.typename = self.type.__name__
+        #: the exception traceback (_pytest._code.Traceback instance)
+        self.traceback = _pytest._code.Traceback(self.tb)
+
+    def __repr__(self):
+        return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+    def exconly(self, tryshort=False):
+        """ return the exception as a string
+
+            when 'tryshort' resolves to True, and the exception is a
+            _pytest._code._AssertionError, only the actual exception part of
+            the exception representation is returned (so 'AssertionError: ' is
+            removed from the beginning)
+        """
+        lines = format_exception_only(self.type, self.value)
+        text = ''.join(lines)
+        text = text.rstrip()
+        if tryshort:
+            if text.startswith(self._striptext):
+                text = text[len(self._striptext):]
+        return text
+
+    def errisinstance(self, exc):
+        """ return True if the exception is an instance of exc """
+        return isinstance(self.value, exc)
+
+    def _getreprcrash(self):
+        exconly = self.exconly(tryshort=True)
+        entry = self.traceback.getcrashentry()
+        path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+        return ReprFileLocation(path, lineno+1, exconly)
+
+    def getrepr(self, showlocals=False, style="long",
+            abspath=False, tbfilter=True, funcargs=False):
+        """ return str()able representation of this exception info.
+            showlocals: show locals per traceback entry
+            style: long|short|no|native traceback style
+            tbfilter: hide entries (where __tracebackhide__ is true)
+
+            in case of style==native, tbfilter and showlocals is ignored.
+        """
+        if style == 'native':
+            return ReprExceptionInfo(ReprTracebackNative(
+                py.std.traceback.format_exception(
+                    self.type,
+                    self.value,
+                    self.traceback[0]._rawentry,
+                )), self._getreprcrash())
+
+        fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+            abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+        return fmt.repr_excinfo(self)
+
+    def __str__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return str(loc)
+
+    def __unicode__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return unicode(loc)
+
+
+class FormattedExcinfo(object):
+    """ presenting information about failing Functions and Generators. """
+    # for traceback entries
+    flow_marker = ">"
+    fail_marker = "E"
+
+    def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+        self.showlocals = showlocals
+        self.style = style
+        self.tbfilter = tbfilter
+        self.funcargs = funcargs
+        self.abspath = abspath
+        self.astcache = {}
+
+    def _getindent(self, source):
+        # figure out indent for given source
+        try:
+            s = str(source.getstatement(len(source)-1))
+        except KeyboardInterrupt:
+            raise
+        except:
+            try:
+                s = str(source[-1])
+            except KeyboardInterrupt:
+                raise
+            except:
+                return 0
+        return 4 + (len(s) - len(s.lstrip()))
+
+    def _getentrysource(self, entry):
+        source = entry.getsource(self.astcache)
+        if source is not None:
+            source = source.deindent()
+        return source
+
+    def _saferepr(self, obj):
+        return py.io.saferepr(obj)
+
+    def repr_args(self, entry):
+        if self.funcargs:
+            args = []
+            for argname, argvalue in entry.frame.getargs(var=True):
+                args.append((argname, self._saferepr(argvalue)))
+            return ReprFuncArgs(args)
+
+    def get_source(self, source, line_index=-1, excinfo=None, short=False):
+        """ return formatted and marked up source lines. """
+        import _pytest._code
+        lines = []
+        if source is None or line_index >= len(source.lines):
+            source = _pytest._code.Source("???")
+            line_index = 0
+        if line_index < 0:
+            line_index += len(source)
+        space_prefix = "    "
+        if short:
+            lines.append(space_prefix + source.lines[line_index].strip())
+        else:
+            for line in source.lines[:line_index]:
+                lines.append(space_prefix + line)
+            lines.append(self.flow_marker + "   " + source.lines[line_index])
+            for line in source.lines[line_index+1:]:
+                lines.append(space_prefix + line)
+        if excinfo is not None:
+            indent = 4 if short else self._getindent(source)
+            lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+        return lines
+
+    def get_exconly(self, excinfo, indent=4, markall=False):
+        lines = []
+        indent = " " * indent
+        # get the real exception information out
+        exlines = excinfo.exconly(tryshort=True).split('\n')
+        failindent = self.fail_marker + indent[1:]
+        for line in exlines:
+            lines.append(failindent + line)
+            if not markall:
+                failindent = indent
+        return lines
+
+    def repr_locals(self, locals):
+        if self.showlocals:
+            lines = []
+            keys = [loc for loc in locals if loc[0] != "@"]
+            keys.sort()
+            for name in keys:
+                value = locals[name]
+                if name == '__builtins__':
+                    lines.append("__builtins__ = <builtins>")
+                else:
+                    # This formatting could all be handled by the
+                    # _repr() function, which is only reprlib.Repr in
+                    # disguise, so is very configurable.
+                    str_repr = self._saferepr(value)
+                    #if len(str_repr) < 70 or not isinstance(value,
+                    #                            (list, tuple, dict)):
+                    lines.append("%-10s = %s" %(name, str_repr))
+                    #else:
+                    #    self._line("%-10s =\\" % (name,))
+                    #    # XXX
+                    #    py.std.pprint.pprint(value, stream=self.excinfowriter)
+            return ReprLocals(lines)
+
+    def repr_traceback_entry(self, entry, excinfo=None):
+        import _pytest._code
+        source = self._getentrysource(entry)
+        if source is None:
+            source = _pytest._code.Source("???")
+            line_index = 0
+        else:
+            # entry.getfirstlinesource() can be -1, should be 0 on jython
+            line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+        lines = []
+        style = entry._repr_style
+        if style is None:
+            style = self.style
+        if style in ("short", "long"):
+            short = style == "short"
+            reprargs = self.repr_args(entry) if not short else None
+            s = self.get_source(source, line_index, excinfo, short=short)
+            lines.extend(s)
+            if short:
+                message = "in %s" %(entry.name)
+            else:
+                message = excinfo and excinfo.typename or ""
+            path = self._makepath(entry.path)
+            filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+            localsrepr = None
+            if not short:
+                localsrepr =  self.repr_locals(entry.locals)
+            return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+        if excinfo:
+            lines.extend(self.get_exconly(excinfo, indent=4))
+        return ReprEntry(lines, None, None, None, style)
+
+    def _makepath(self, path):
+        if not self.abspath:
+            try:
+                np = py.path.local().bestrelpath(path)
+            except OSError:
+                return path
+            if len(np) < len(str(path)):
+                path = np
+        return path
+
+    def repr_traceback(self, excinfo):
+        traceback = excinfo.traceback
+        if self.tbfilter:
+            traceback = traceback.filter()
+        recursionindex = None
+        if is_recursion_error(excinfo):
+            recursionindex = traceback.recursionindex()
+        last = traceback[-1]
+        entries = []
+        extraline = None
+        for index, entry in enumerate(traceback):
+            einfo = (last == entry) and excinfo or None
+            reprentry = self.repr_traceback_entry(entry, einfo)
+            entries.append(reprentry)
+            if index == recursionindex:
+                extraline = "!!! Recursion detected (same locals & position)"
+                break
+        return ReprTraceback(entries, extraline, style=self.style)
+
+    def repr_excinfo(self, excinfo):
+        reprtraceback = self.repr_traceback(excinfo)
+        reprcrash = excinfo._getreprcrash()
+        return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+    def __str__(self):
+        s = self.__unicode__()
+        if sys.version_info[0] < 3:
+            s = s.encode('utf-8')
+        return s
+
+    def __unicode__(self):
+        # FYI this is called from pytest-xdist's serialization of exception
+        # information.
+        io = py.io.TextIO()
+        tw = py.io.TerminalWriter(file=io)
+        self.toterminal(tw)
+        return io.getvalue().strip()
+
+    def __repr__(self):
+        return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+    def __init__(self, reprtraceback, reprcrash):
+        self.reprtraceback = reprtraceback
+        self.reprcrash = reprcrash
+        self.sections = []
+
+    def addsection(self, name, content, sep="-"):
+        self.sections.append((name, content, sep))
+
+    def toterminal(self, tw):
+        self.reprtraceback.toterminal(tw)
+        for name, content, sep in self.sections:
+            tw.sep(sep, name)
+            tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+    entrysep = "_ "
+
+    def __init__(self, reprentries, extraline, style):
+        self.reprentries = reprentries
+        self.extraline = extraline
+        self.style = style
+
+    def toterminal(self, tw):
+        # the entries might have different styles
+        for i, entry in enumerate(self.reprentries):
+            if entry.style == "long":
+                tw.line("")
+            entry.toterminal(tw)
+            if i < len(self.reprentries) - 1:
+                next_entry = self.reprentries[i+1]
+                if entry.style == "long" or \
+                   entry.style == "short" and next_entry.style == "long":
+                    tw.sep(self.entrysep)
+
+        if self.extraline:
+            tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+    def __init__(self, tblines):
+        self.style = "native"
+        self.reprentries = [ReprEntryNative(tblines)]
+        self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+    style = "native"
+
+    def __init__(self, tblines):
+        self.lines = tblines
+
+    def toterminal(self, tw):
+        tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+    localssep = "_ "
+
+    def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+        self.lines = lines
+        self.reprfuncargs = reprfuncargs
+        self.reprlocals = reprlocals
+        self.reprfileloc = filelocrepr
+        self.style = style
+
+    def toterminal(self, tw):
+        if self.style == "short":
+            self.reprfileloc.toterminal(tw)
+            for line in self.lines:
+                red = line.startswith("E   ")
+                tw.line(line, bold=True, red=red)
+            #tw.line("")
+            return
+        if self.reprfuncargs:
+            self.reprfuncargs.toterminal(tw)
+        for line in self.lines:
+            red = line.startswith("E   ")
+            tw.line(line, bold=True, red=red)
+        if self.reprlocals:
+            #tw.sep(self.localssep, "Locals")
+            tw.line("")
+            self.reprlocals.toterminal(tw)
+        if self.reprfileloc:
+            if self.lines:
+                tw.line("")
+            self.reprfileloc.toterminal(tw)
+
+    def __str__(self):
+        return "%s\n%s\n%s" % ("\n".join(self.lines),
+                               self.reprlocals,
+                               self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+    def __init__(self, path, lineno, message):
+        self.path = str(path)
+        self.lineno = lineno
+        self.message = message
+
+    def toterminal(self, tw):
+        # filename and lineno output for each entry,
+        # using an output format that most editors unterstand
+        msg = self.message
+        i = msg.find("\n")
+        if i != -1:
+            msg = msg[:i]
+        tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+    def __init__(self, lines):
+        self.lines = lines
+
+    def toterminal(self, tw):
+        for line in self.lines:
+            tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+    def __init__(self, args):
+        self.args = args
+
+    def toterminal(self, tw):
+        if self.args:
+            linesofar = ""
+            for name, value in self.args:
+                ns = "%s = %s" %(name, value)
+                if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+                    if linesofar:
+                        tw.line(linesofar)
+                    linesofar =  ns
+                else:
+                    if linesofar:
+                        linesofar += ", " + ns
+                    else:
+                        linesofar = ns
+            if linesofar:
+                tw.line(linesofar)
+            tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+    """ put compile and AssertionError builtins to Python's builtins. """
+    if assertion:
+        from _pytest.assertion import reinterpret
+        l = oldbuiltins.setdefault('AssertionError', [])
+        l.append(py.builtin.builtins.AssertionError)
+        py.builtin.builtins.AssertionError = reinterpret.AssertionError
+    if compile:
+        import _pytest._code
+        l = oldbuiltins.setdefault('compile', [])
+        l.append(py.builtin.builtins.compile)
+        py.builtin.builtins.compile = _pytest._code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+    """ remove compile and AssertionError builtins from Python builtins. """
+    if assertion:
+        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+    if compile:
+        py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+    """ return code object for given function. """
+    try:
+        return obj.__code__
+    except AttributeError:
+        obj = getattr(obj, 'im_func', obj)
+        obj = getattr(obj, 'func_code', obj)
+        obj = getattr(obj, 'f_code', obj)
+        obj = getattr(obj, '__code__', obj)
+        if trycall and not hasattr(obj, 'co_firstlineno'):
+            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+                x = getrawcode(obj.__call__, trycall=False)
+                if hasattr(x, 'co_firstlineno'):
+                    return x
+        return obj
+
+if sys.version_info[:2] >= (3, 5):  # RecursionError introduced in 3.5
+    def is_recursion_error(excinfo):
+        return excinfo.errisinstance(RecursionError)  # noqa
+else:
+    def is_recursion_error(excinfo):
+        if not excinfo.errisinstance(RuntimeError):
+            return False
+        try:
+            return "maximum recursion depth exceeded" in str(excinfo.value)
+        except UnicodeError:
+            return False
diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_code/source.py
@@ -0,0 +1,421 @@
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+    import _ast
+    from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+    _AST_FLAG = 0
+    _ast = None
+
+
+class Source(object):
+    """ a immutable object holding a source code fragment,
+        possibly deindenting it.
+    """
+    _compilecounter = 0
+    def __init__(self, *parts, **kwargs):
+        self.lines = lines = []
+        de = kwargs.get('deindent', True)
+        rstrip = kwargs.get('rstrip', True)
+        for part in parts:
+            if not part:
+                partlines = []
+            if isinstance(part, Source):
+                partlines = part.lines
+            elif isinstance(part, (tuple, list)):
+                partlines = [x.rstrip("\n") for x in part]
+            elif isinstance(part, py.builtin._basestring):
+                partlines = part.split('\n')
+                if rstrip:
+                    while partlines:
+                        if partlines[-1].strip():
+                            break
+                        partlines.pop()
+            else:
+                partlines = getsource(part, deindent=de).lines
+            if de:
+                partlines = deindent(partlines)
+            lines.extend(partlines)
+
+    def __eq__(self, other):
+        try:
+            return self.lines == other.lines
+        except AttributeError:
+            if isinstance(other, str):
+                return str(self) == other
+            return False
+
+    def __getitem__(self, key):
+        if isinstance(key, int):
+            return self.lines[key]
+        else:
+            if key.step not in (None, 1):
+                raise IndexError("cannot slice a Source with a step")
+            return self.__getslice__(key.start, key.stop)
+
+    def __len__(self):
+        return len(self.lines)
+
+    def __getslice__(self, start, end):
+        newsource = Source()
+        newsource.lines = self.lines[start:end]
+        return newsource
+
+    def strip(self):
+        """ return new source object with trailing
+            and leading blank lines removed.
+        """
+        start, end = 0, len(self)
+        while start < end and not self.lines[start].strip():
+            start += 1
+        while end > start and not self.lines[end-1].strip():
+            end -= 1
+        source = Source()
+        source.lines[:] = self.lines[start:end]
+        return source
+
+    def putaround(self, before='', after='', indent=' ' * 4):
+        """ return a copy of the source object with
+            'before' and 'after' wrapped around it.
+        """
+        before = Source(before)
+        after = Source(after)
+        newsource = Source()
+        lines = [ (indent + line) for line in self.lines]
+        newsource.lines = before.lines + lines +  after.lines
+        return newsource
+
+    def indent(self, indent=' ' * 4):
+        """ return a copy of the source object with
+            all lines indented by the given indent-string.
+        """
+        newsource = Source()
+        newsource.lines = [(indent+line) for line in self.lines]
+        return newsource
+
+    def getstatement(self, lineno, assertion=False):
+        """ return Source statement which contains the
+            given linenumber (counted from 0).
+        """
+        start, end = self.getstatementrange(lineno, assertion)
+        return self[start:end]
+
+    def getstatementrange(self, lineno, assertion=False):
+        """ return (start, end) tuple which spans the minimal
+            statement region which containing the given lineno.
+        """
+        if not (0 <= lineno < len(self)):
+            raise IndexError("lineno out of range")
+        ast, start, end = getstatementrange_ast(lineno, self)
+        return start, end
+
+    def deindent(self, offset=None):
+        """ return a new source object deindented by offset.
+            If offset is None then guess an indentation offset from
+            the first non-blank line.  Subsequent lines which have a
+            lower indentation offset will be copied verbatim as
+            they are assumed to be part of multilines.
+        """
+        # XXX maybe use the tokenizer to properly handle multiline
+        #     strings etc.pp?
+        newsource = Source()
+        newsource.lines[:] = deindent(self.lines, offset)
+        return newsource
+
+    def isparseable(self, deindent=True):
+        """ return True if source is parseable, heuristically
+            deindenting it by default.
+        """
+        try:
+            import parser
+        except ImportError:
+            syntax_checker = lambda x: compile(x, 'asd', 'exec')
+        else:
+            syntax_checker = parser.suite
+
+        if deindent:
+            source = str(self.deindent())
+        else:
+            source = str(self)
+        try:
+            #compile(source+'\n', "x", "exec")
+            syntax_checker(source+'\n')
+        except KeyboardInterrupt:
+            raise
+        except Exception:
+            return False
+        else:
+            return True
+
+    def __str__(self):
+        return "\n".join(self.lines)
+
+    def compile(self, filename=None, mode='exec',
+                flag=generators.compiler_flag,
+                dont_inherit=0, _genframe=None):
+        """ return compiled code object. if filename is None
+            invent an artificial filename which displays
+            the source/line position of the caller frame.
+        """
+        if not filename or py.path.local(filename).check(file=0):
+            if _genframe is None:
+                _genframe = sys._getframe(1) # the caller
+            fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+            base = "<%d-codegen " % self._compilecounter
+            self.__class__._compilecounter += 1
+            if not filename:
+                filename = base + '%s:%d>' % (fn, lineno)
+            else:
+                filename = base + '%r %s:%d>' % (filename, fn, lineno)
+        source = "\n".join(self.lines) + '\n'
+        try:
+            co = cpy_compile(source, filename, mode, flag)
+        except SyntaxError:
+            ex = sys.exc_info()[1]
+            # re-represent syntax errors from parsing python strings
+            msglines = self.lines[:ex.lineno]
+            if ex.offset:
+                msglines.append(" "*ex.offset + '^')
+            msglines.append("(code was compiled probably from here: %s)" % filename)
+            newex = SyntaxError('\n'.join(msglines))
+            newex.offset = ex.offset
+            newex.lineno = ex.lineno
+            newex.text = ex.text
+            raise newex
+        else:
+            if flag & _AST_FLAG:
+                return co
+            lines = [(x + "\n") for x in self.lines]
+            if sys.version_info[0] >= 3:
+                # XXX py3's inspect.getsourcefile() checks for a module
+                # and a pep302 __loader__ ... we don't have a module
+                # at code compile-time so we need to fake it here
+                m = ModuleType("_pycodecompile_pseudo_module")
+                py.std.inspect.modulesbyfile[filename] = None
+                py.std.sys.modules[None] = m
+                m.__loader__ = 1
+            py.std.linecache.cache[filename] = (1, None, lines, filename)
+            return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+            generators.compiler_flag, dont_inherit=0):
+    """ compile the given source to a raw code object,
+        and maintain an internal cache which allows later
+        retrieval of the source code for the code object
+        and any recursively created code objects.
+    """
+    if _ast is not None and isinstance(source, _ast.AST):
+        # XXX should Source support having AST?
+        return cpy_compile(source, filename, mode, flags, dont_inherit)
+    _genframe = sys._getframe(1) # the caller
+    s = Source(source)
+    co = s.compile(filename, mode, flags, _genframe=_genframe)
+    return co
+
+
+def getfslineno(obj):
+    """ Return source location (path, lineno) for the given object.
+    If the source cannot be determined return ("", -1)
+    """
+    import _pytest._code
+    try:
+        code = _pytest._code.Code(obj)
+    except TypeError:
+        try:
+            fn = (py.std.inspect.getsourcefile(obj) or
+                  py.std.inspect.getfile(obj))
+        except TypeError:
+            return "", -1
+
+        fspath = fn and py.path.local(fn) or None
+        lineno = -1
+        if fspath:
+            try:
+                _, lineno = findsource(obj)
+            except IOError:
+                pass
+    else:
+        fspath = code.path
+        lineno = code.firstlineno
+    assert isinstance(lineno, int)
+    return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+    try:
+        sourcelines, lineno = py.std.inspect.findsource(obj)
+    except py.builtin._sysex:
+        raise
+    except:
+        return None, -1
+    source = Source()
+    source.lines = [line.rstrip() for line in sourcelines]
+    return source, lineno
+
+def getsource(obj, **kwargs):
+    import _pytest._code
+    obj = _pytest._code.getrawcode(obj)
+    try:
+        strsrc = inspect.getsource(obj)
+    except IndentationError:
+        strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+    assert isinstance(strsrc, str)
+    return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+    if offset is None:
+        for line in lines:
+            line = line.expandtabs()
+            s = line.lstrip()
+            if s:
+                offset = len(line)-len(s)
+                break
+        else:
+            offset = 0
+    if offset == 0:
+        return list(lines)
+    newlines = []
+    def readline_generator(lines):
+        for line in lines:
+            yield line + '\n'
+        while True:
+            yield ''
+
+    it = readline_generator(lines)
+
+    try:
+        for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+            if sline > len(lines):
+                break # End of input reached
+            if sline > len(newlines):
+                line = lines[sline - 1].expandtabs()
+                if line.lstrip() and line[:offset].isspace():
+                    line = line[offset:] # Deindent
+                newlines.append(line)
+
+            for i in range(sline, eline):
+                # Don't deindent continuing lines of
+                # multiline tokens (i.e. multiline strings)
+                newlines.append(lines[i])
+    except (IndentationError, tokenize.TokenError):
+        pass
+    # Add any lines we didn't see. E.g. if an exception was raised.
+    newlines.extend(lines[len(newlines):])
+    return newlines
+
+
+def get_statement_startend2(lineno, node):
+    import ast
+    # flatten all statements and except handlers into one lineno-list
+    # AST's line numbers start indexing at 1
+    l = []
+    for x in ast.walk(node):
+        if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+            l.append(x.lineno - 1)
+            for name in "finalbody", "orelse":
+                val = getattr(x, name, None)
+                if val:
+                    # treat the finally/orelse part as its own statement
+                    l.append(val[0].lineno - 1 - 1)
+    l.sort()
+    insert_index = bisect_right(l, lineno)
+    start = l[insert_index - 1]
+    if insert_index >= len(l):
+        end = None
+    else:
+        end = l[insert_index]
+    return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+    if astnode is None:
+        content = str(source)
+        if sys.version_info < (2,7):
+            content += "\n"
+        try:
+            astnode = compile(content, "source", "exec", 1024)  # 1024 for AST
+        except ValueError:
+            start, end = getstatementrange_old(lineno, source, assertion)
+            return None, start, end
+    start, end = get_statement_startend2(lineno, astnode)
+    # we need to correct the end:
+    # - ast-parsing strips comments
+    # - there might be empty lines
+    # - we might have lesser indented code blocks at the end
+    if end is None:
+        end = len(source.lines)
+
+    if end > start + 1:
+        # make sure we don't span differently indented code blocks
+        # by using the BlockFinder helper used which inspect.getsource() uses itself
+        block_finder = inspect.BlockFinder()
+        # if we start with an indented line, put blockfinder to "started" mode
+        block_finder.started = source.lines[start][0].isspace()
+        it = ((x + "\n") for x in source.lines[start:end])
+        try:
+            for tok in tokenize.generate_tokens(lambda: next(it)):
+                block_finder.tokeneater(*tok)
+        except (inspect.EndOfBlock, IndentationError):
+            end = block_finder.last + start
+        except Exception:
+            pass
+
+    # the end might still point to a comment or empty line, correct it
+    while end:
+        line = source.lines[end - 1].lstrip()
+        if line.startswith("#") or not line:
+            end -= 1
+        else:
+            break
+    return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+    """ return (start, end) tuple which spans the minimal
+        statement region which containing the given lineno.
+        raise an IndexError if no such statementrange can be found.
+    """
+    # XXX this logic is only used on python2.4 and below
+    # 1. find the start of the statement
+    from codeop import compile_command
+    for start in range(lineno, -1, -1):
+        if assertion:
+            line = source.lines[start]
+            # the following lines are not fully tested, change with care
+            if 'super' in line and 'self' in line and '__init__' in line:
+                raise IndexError("likely a subclass")
+            if "assert" not in line and "raise" not in line:
+                continue
+        trylines = source.lines[start:lineno+1]
+        # quick hack to prepare parsing an indented line with
+        # compile_command() (which errors on "return" outside defs)
+        trylines.insert(0, 'def xxx():')
+        trysource = '\n '.join(trylines)
+        #              ^ space here
+        try:
+            compile_command(trysource)
+        except (SyntaxError, OverflowError, ValueError):
+            continue
+
+        # 2. find the end of the statement
+        for end in range(lineno+1, len(source)+1):
+            trysource = source[start:end]
+            if trysource.isparseable():
+                return start, end
+    raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/_pytest/_pluggy.py b/_pytest/_pluggy.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_pluggy.py
@@ -0,0 +1,11 @@
+"""
+imports symbols from vendored "pluggy" if available, otherwise
+falls back to importing "pluggy" from the default namespace.
+"""
+
+try:
+    from _pytest.vendored_packages.pluggy import *  # noqa
+    from _pytest.vendored_packages.pluggy import __version__  # noqa
+except ImportError:
+    from pluggy import *  # noqa
+    from pluggy import __version__  # noqa
diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py
new file mode 100755
--- /dev/null
+++ b/_pytest/cacheprovider.py
@@ -0,0 +1,245 @@
+"""
+merged implementation of the cache provider
+
+the name cache was not choosen to ensure pluggy automatically
+ignores the external pytest-cache
+"""
+
+import py
+import pytest
+import json
+from os.path import sep as _sep, altsep as _altsep
+
+
+class Cache(object):
+    def __init__(self, config):
+        self.config = config
+        self._cachedir = config.rootdir.join(".cache")
+        self.trace = config.trace.root.get("cache")
+        if config.getvalue("cacheclear"):
+            self.trace("clearing cachedir")
+            if self._cachedir.check():
+                self._cachedir.remove()
+            self._cachedir.mkdir()
+
+    def makedir(self, name):
+        """ return a directory path object with the given name.  If the
+        directory does not yet exist, it will be created.  You can use it
+        to manage files likes e. g. store/retrieve database
+        dumps across test sessions.
+
+        :param name: must be a string not containing a ``/`` separator.
+             Make sure the name contains your plugin or application
+             identifiers to prevent clashes with other cache users.
+        """
+        if _sep in name or _altsep is not None and _altsep in name:
+            raise ValueError("name is not allowed to contain path separators")
+        return self._cachedir.ensure_dir("d", name)
+
+    def _getvaluepath(self, key):
+        return self._cachedir.join('v', *key.split('/'))
+
+    def get(self, key, default):
+        """ return cached value for the given key.  If no value
+        was yet cached or the value cannot be read, the specified
+        default is returned.
+
+        :param key: must be a ``/`` separated value. Usually the first
+             name is the name of your plugin or your application.
+        :param default: must be provided in case of a cache-miss or
+             invalid cache values.
+
+        """
+        path = self._getvaluepath(key)
+        if path.check():
+            try:
+                with path.open("r") as f:
+                    return json.load(f)
+            except ValueError:
+                self.trace("cache-invalid at %s" % (path,))
+        return default
+
+    def set(self, key, value):
+        """ save value for the given key.
+
+        :param key: must be a ``/`` separated value. Usually the first
+             name is the name of your plugin or your application.
+        :param value: must be of any combination of basic
+               python types, including nested types
+               like e. g. lists of dictionaries.
+        """
+        path = self._getvaluepath(key)
+        try:
+            path.dirpath().ensure_dir()
+        except (py.error.EEXIST, py.error.EACCES):
+            self.config.warn(
+                code='I9', message='could not create cache path %s' % (path,)
+            )
+            return
+        try:
+            f = path.open('w')
+        except py.error.ENOTDIR:
+            self.config.warn(
+                code='I9', message='cache could not write path %s' % (path,))
+        else:
+            with f:
+                self.trace("cache-write %s: %r" % (key, value,))
+                json.dump(value, f, indent=2, sort_keys=True)
+
+
+class LFPlugin:
+    """ Plugin which implements the --lf (run last-failing) option """
+    def __init__(self, config):
+        self.config = config
+        active_keys = 'lf', 'failedfirst'
+        self.active = any(config.getvalue(key) for key in active_keys)
+        if self.active:
+            self.lastfailed = config.cache.get("cache/lastfailed", {})
+        else:
+            self.lastfailed = {}
+
+    def pytest_report_header(self):
+        if self.active:
+            if not self.lastfailed:
+                mode = "run all (no recorded failures)"
+            else:
+                mode = "rerun last %d failures%s" % (
+                    len(self.lastfailed),
+                    " first" if self.config.getvalue("failedfirst") else "")
+            return "run-last-failure: %s" % mode
+
+    def pytest_runtest_logreport(self, report):
+        if report.failed and "xfail" not in report.keywords:
+            self.lastfailed[report.nodeid] = True
+        elif not report.failed:
+            if report.when == "call":
+                self.lastfailed.pop(report.nodeid, None)
+
+    def pytest_collectreport(self, report):
+        passed = report.outcome in ('passed', 'skipped')
+        if passed:
+            if report.nodeid in self.lastfailed:
+                self.lastfailed.pop(report.nodeid)
+                self.lastfailed.update(
+                    (item.nodeid, True)
+                    for item in report.result)
+        else:
+            self.lastfailed[report.nodeid] = True
+
+    def pytest_collection_modifyitems(self, session, config, items):
+        if self.active and self.lastfailed:
+            previously_failed = []
+            previously_passed = []
+            for item in items:
+                if item.nodeid in self.lastfailed:
+                    previously_failed.append(item)
+                else:
+                    previously_passed.append(item)
+            if not previously_failed and previously_passed:
+                # running a subset of all tests with recorded failures outside
+                # of the set of tests currently executing
+                pass
+            elif self.config.getvalue("failedfirst"):
+                items[:] = previously_failed + previously_passed
+            else:
+                items[:] = previously_failed
+                config.hook.pytest_deselected(items=previously_passed)
+
+    def pytest_sessionfinish(self, session):
+        config = self.config
+        if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
+            return
+        prev_failed = config.cache.get("cache/lastfailed", None) is not None
+        if (session.testscollected and prev_failed) or self.lastfailed:
+            config.cache.set("cache/lastfailed", self.lastfailed)
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group.addoption(
+        '--lf', '--last-failed', action='store_true', dest="lf",
+        help="rerun only the tests that failed "
+             "at the last run (or all if none failed)")
+    group.addoption(
+        '--ff', '--failed-first', action='store_true', dest="failedfirst",
+        help="run all tests but run the last failures first.  "
+             "This may re-order tests and thus lead to "
+             "repeated fixture setup/teardown")
+    group.addoption(
+        '--cache-show', action='store_true', dest="cacheshow",
+        help="show cache contents, don't perform collection or tests")
+    group.addoption(
+        '--cache-clear', action='store_true', dest="cacheclear",
+        help="remove all cache contents at start of test run.")
+
+
+def pytest_cmdline_main(config):
+    if config.option.cacheshow:
+        from _pytest.main import wrap_session
+        return wrap_session(config, cacheshow)
+
+
+
+ at pytest.hookimpl(tryfirst=True)
+def pytest_configure(config):
+    config.cache = Cache(config)
+    config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+
+ at pytest.fixture
+def cache(request):
+    """
+    Return a cache object that can persist state between testing sessions.
+
+    cache.get(key, default)
+    cache.set(key, value)
+
+    Keys must be a ``/`` separated value, where the first part is usually the
+    name of your plugin or application to avoid clashes with other cache users.
+
+    Values can be any object handled by the json stdlib module.
+    """
+    return request.config.cache
+
+
+def pytest_report_header(config):
+    if config.option.verbose:
+        relpath = py.path.local().bestrelpath(config.cache._cachedir)
+        return "cachedir: %s" % relpath
+
+
+def cacheshow(config, session):
+    from pprint import pprint
+    tw = py.io.TerminalWriter()
+    tw.line("cachedir: " + str(config.cache._cachedir))
+    if not config.cache._cachedir.check():
+        tw.line("cache is empty")
+        return 0
+    dummy = object()
+    basedir = config.cache._cachedir
+    vdir = basedir.join("v")
+    tw.sep("-", "cache values")
+    for valpath in vdir.visit(lambda x: x.isfile()):
+        key = valpath.relto(vdir).replace(valpath.sep, "/")
+        val = config.cache.get(key, dummy)
+        if val is dummy:
+            tw.line("%s contains unreadable content, "
+                  "will be ignored" % key)
+        else:
+            tw.line("%s contains:" % key)
+            stream = py.io.TextIO()
+            pprint(val, stream=stream)
+            for line in stream.getvalue().splitlines():
+                tw.line("  " + line)
+
+    ddir = basedir.join("d")
+    if ddir.isdir() and ddir.listdir():
+        tw.sep("-", "cache directories")
+        for p in basedir.join("d").visit():
+            #if p.check(dir=1):
+            #    print("%s/" % p.relto(basedir))
+            if p.isfile():
+                key = p.relto(basedir)
+                tw.line("%s is a file of length %d" % (
+                        key, p.size()))
+    return 0
diff --git a/_pytest/vendored_packages/README.md b/_pytest/vendored_packages/README.md
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/README.md
@@ -0,0 +1,13 @@
+This directory vendors the `pluggy` module.
+
+For a more detailed discussion for the reasons to vendoring this 
+package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
+
+To update the current version, execute:
+
+```
+$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
+```
+
+And commit the modified files. The `pluggy-<version>.dist-info` directory 
+created by `pip` should be ignored.
diff --git a/_pytest/vendored_packages/__init__.py b/_pytest/vendored_packages/__init__.py
new file mode 100644
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,10 @@
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
@@ -0,0 +1,39 @@
+Metadata-Version: 2.0
+Name: pluggy
+Version: 0.3.1
+Summary: plugin and hook calling mechanisms for python
+Home-page: UNKNOWN
+Author: Holger Krekel
+Author-email: holger at merlinux.eu
+License: MIT license
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: win32
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
@@ -0,0 +1,8 @@
+pluggy.py,sha256=v_RfWzyW6DPU1cJu_EFoL_OHq3t13qloVdR6UaMCXQA,29862
+pluggy-0.3.1.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
+pluggy-0.3.1.dist-info/pbr.json,sha256=xX3s6__wOcAyF-AZJX1sdZyW6PUXT-FkfBlM69EEUCg,47
+pluggy-0.3.1.dist-info/RECORD,,
+pluggy-0.3.1.dist-info/metadata.json,sha256=nLKltOT78dMV-00uXD6Aeemp4xNsz2q59j6ORSDeLjw,1027
+pluggy-0.3.1.dist-info/METADATA,sha256=1b85Ho2u4iK30M099k7axMzcDDhLcIMb-A82JUJZnSo,1334
+pluggy-0.3.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+pluggy-0.3.1.dist-info/DESCRIPTION.rst,sha256=P5Akh1EdIBR6CeqtV2P8ZwpGSpZiTKPw0NyS7jEiD-g,306
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
@@ -0,0 +1,1 @@
+{"license": "MIT license", "name": "pluggy", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "plugin and hook calling mechanisms for python", "platform": "unix", "version": "0.3.1", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "holger at merlinux.eu", "name": "Holger Krekel"}]}}, "classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]}
\ No newline at end of file
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
@@ -0,0 +1,1 @@
+{"is_release": false, "git_version": "7d4c9cd"}
\ No newline at end of file
diff --git a/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
@@ -0,0 +1,1 @@
+pluggy
diff --git a/_pytest/vendored_packages/pluggy.py b/_pytest/vendored_packages/pluggy.py
new file mode 100644
--- /dev/null
+++ b/_pytest/vendored_packages/pluggy.py
@@ -0,0 +1,777 @@
+"""
+PluginManager, basic initialization and tracing.
+
+pluggy is the cristallized core of plugin management as used
+by some 150 plugins for pytest.
+
+Pluggy uses semantic versioning. Breaking changes are only foreseen for
+Major releases (incremented X in "X.Y.Z").  If you want to use pluggy in
+your project you should thus use a dependency restriction like
+"pluggy>=0.1.0,<1.0" to avoid surprises.
+
+pluggy is concerned with hook specification, hook implementations and hook
+calling.  For any given hook specification a hook call invokes up to N implementations.
+A hook implementation can influence its position and type of execution:
+if attributed "tryfirst" or "trylast" it will be tried to execute
+first or last.  However, if attributed "hookwrapper" an implementation
+can wrap all calls to non-hookwrapper implementations.  A hookwrapper
+can thus execute some code ahead and after the execution of other hooks.
+
+Hook specification is done by way of a regular python function where
+both the function name and the names of all its arguments are significant.
+Each hook implementation function is verified against the original specification
+function, including the names of all its arguments.  To allow for hook specifications
+to evolve over the livetime of a project, hook implementations can
+accept less arguments.  One can thus add new arguments and semantics to
+a hook specification by adding another argument typically without breaking
+existing hook implementations.
+
+The chosen approach is meant to let a hook designer think carefuly about
+which objects are needed by an extension writer.  By contrast, subclass-based
+extension mechanisms often expose a lot more state and behaviour than needed,
+thus restricting future developments.
+
+Pluggy currently consists of functionality for:
+
+- a way to register new hook specifications.  Without a hook
+  specification no hook calling can be performed.
+
+- a registry of plugins which contain hook implementation functions.  It
+  is possible to register plugins for which a hook specification is not yet
+  known and validate all hooks when the system is in a more referentially
+  consistent state.  Setting an "optionalhook" attribution to a hook
+  implementation will avoid PluginValidationError's if a specification
+  is missing.  This allows to have optional integration between plugins.
+
+- a "hook" relay object from which you can launch 1:N calls to
+  registered hook implementation functions
+
+- a mechanism for ordering hook implementation functions
+
+- mechanisms for two different type of 1:N calls: "firstresult" for when
+  the call should stop when the first implementation returns a non-None result.
+  And the other (default) way of guaranteeing that all hook implementations
+  will be called and their non-None result collected.
+
+- mechanisms for "historic" extension points such that all newly
+  registered functions will receive all hook calls that happened
+  before their registration.
+
+- a mechanism for discovering plugin objects which are based on
+  setuptools based entry points.
+
+- a simple tracing mechanism, including tracing of plugin calls and
+  their arguments.
+
+"""
+import sys
+import inspect
+
+__version__ = '0.3.1'
+__all__ = ["PluginManager", "PluginValidationError",
+           "HookspecMarker", "HookimplMarker"]
+
+_py3 = sys.version_info > (3, 0)
+
+
+class HookspecMarker:
+    """ Decorator helper class for marking functions as hook specifications.
+
+    You can instantiate it with a project_name to get a decorator.
+    Calling PluginManager.add_hookspecs later will discover all marked functions
+    if the PluginManager uses the same project_name.
+    """
+
+    def __init__(self, project_name):
+        self.project_name = project_name
+
+    def __call__(self, function=None, firstresult=False, historic=False):
+        """ if passed a function, directly sets attributes on the function
+        which will make it discoverable to add_hookspecs().  If passed no
+        function, returns a decorator which can be applied to a function
+        later using the attributes supplied.
+
+        If firstresult is True the 1:N hook call (N being the number of registered
+        hook implementation functions) will stop at I<=N when the I'th function
+        returns a non-None result.
+
+        If historic is True calls to a hook will be memorized and replayed
+        on later registered plugins.
+
+        """
+        def setattr_hookspec_opts(func):
+            if historic and firstresult:
+                raise ValueError("cannot have a historic firstresult hook")
+            setattr(func, self.project_name + "_spec",
+                   dict(firstresult=firstresult, historic=historic))
+            return func
+
+        if function is not None:
+            return setattr_hookspec_opts(function)
+        else:
+            return setattr_hookspec_opts
+
+
+class HookimplMarker:
+    """ Decorator helper class for marking functions as hook implementations.
+
+    You can instantiate with a project_name to get a decorator.
+    Calling PluginManager.register later will discover all marked functions
+    if the PluginManager uses the same project_name.
+    """
+    def __init__(self, project_name):
+        self.project_name = project_name
+
+    def __call__(self, function=None, hookwrapper=False, optionalhook=False,
+                 tryfirst=False, trylast=False):
+
+        """ if passed a function, directly sets attributes on the function
+        which will make it discoverable to register().  If passed no function,
+        returns a decorator which can be applied to a function later using
+        the attributes supplied.
+
+        If optionalhook is True a missing matching hook specification will not result
+        in an error (by default it is an error if no matching spec is found).
+
+        If tryfirst is True this hook implementation will run as early as possible
+        in the chain of N hook implementations for a specfication.
+
+        If trylast is True this hook implementation will run as late as possible
+        in the chain of N hook implementations.
+
+        If hookwrapper is True the hook implementations needs to execute exactly
+        one "yield".  The code before the yield is run early before any non-hookwrapper
+        function is run.  The code after the yield is run after all non-hookwrapper
+        function have run.  The yield receives an ``_CallOutcome`` object representing
+        the exception or result outcome of the inner calls (including other hookwrapper
+        calls).
+
+        """
+        def setattr_hookimpl_opts(func):
+            setattr(func, self.project_name + "_impl",
+                   dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
+                        tryfirst=tryfirst, trylast=trylast))
+            return func
+
+        if function is None:
+            return setattr_hookimpl_opts
+        else:
+            return setattr_hookimpl_opts(function)
+
+
+def normalize_hookimpl_opts(opts):
+    opts.setdefault("tryfirst", False)
+    opts.setdefault("trylast", False)
+    opts.setdefault("hookwrapper", False)
+    opts.setdefault("optionalhook", False)
+
+
+class _TagTracer:
+    def __init__(self):
+        self._tag2proc = {}
+        self.writer = None
+        self.indent = 0
+
+    def get(self, name):
+        return _TagTracerSub(self, (name,))
+
+    def format_message(self, tags, args):
+        if isinstance(args[-1], dict):
+            extra = args[-1]
+            args = args[:-1]
+        else:
+            extra = {}
+
+        content = " ".join(map(str, args))
+        indent = "  " * self.indent
+
+        lines = [
+            "%s%s [%s]\n" % (indent, content, ":".join(tags))
+        ]
+
+        for name, value in extra.items():
+            lines.append("%s    %s: %s\n" % (indent, name, value))
+        return lines
+
+    def processmessage(self, tags, args):
+        if self.writer is not None and args:
+            lines = self.format_message(tags, args)
+            self.writer(''.join(lines))
+        try:
+            self._tag2proc[tags](tags, args)
+        except KeyError:
+            pass
+
+    def setwriter(self, writer):
+        self.writer = writer
+
+    def setprocessor(self, tags, processor):
+        if isinstance(tags, str):
+            tags = tuple(tags.split(":"))
+        else:
+            assert isinstance(tags, tuple)
+        self._tag2proc[tags] = processor
+
+
+class _TagTracerSub:
+    def __init__(self, root, tags):
+        self.root = root
+        self.tags = tags
+
+    def __call__(self, *args):
+        self.root.processmessage(self.tags, args)
+
+    def setmyprocessor(self, processor):
+        self.root.setprocessor(self.tags, processor)
+
+    def get(self, name):
+        return self.__class__(self.root, self.tags + (name,))
+
+
+def _raise_wrapfail(wrap_controller, msg):
+    co = wrap_controller.gi_code
+    raise RuntimeError("wrap_controller at %r %s:%d %s" %
+                   (co.co_name, co.co_filename, co.co_firstlineno, msg))
+
+
+def _wrapped_call(wrap_controller, func):
+    """ Wrap calling to a function with a generator which needs to yield
+    exactly once.  The yield point will trigger calling the wrapped function
+    and return its _CallOutcome to the yield point.  The generator then needs
+    to finish (raise StopIteration) in order for the wrapped call to complete.
+    """
+    try:
+        next(wrap_controller)   # first yield
+    except StopIteration:
+        _raise_wrapfail(wrap_controller, "did not yield")
+    call_outcome = _CallOutcome(func)
+    try:
+        wrap_controller.send(call_outcome)
+        _raise_wrapfail(wrap_controller, "has second yield")
+    except StopIteration:
+        pass
+    return call_outcome.get_result()
+
+
+class _CallOutcome:
+    """ Outcome of a function call, either an exception or a proper result.
+    Calling the ``get_result`` method will return the result or reraise
+    the exception raised when the function was called. """
+    excinfo = None
+
+    def __init__(self, func):
+        try:
+            self.result = func()
+        except BaseException:
+            self.excinfo = sys.exc_info()
+


More information about the pypy-commit mailing list