[pypy-commit] pypy ffi-backend: Work in progress: remove optimizeopt/fficall, and (plan to) replace
arigo
noreply at buildbot.pypy.org
Thu Aug 2 22:42:05 CEST 2012
Author: Armin Rigo <arigo at tunes.org>
Branch: ffi-backend
Changeset: r56547:72f1bc3b949d
Date: 2012-08-02 22:41 +0200
http://bitbucket.org/pypy/pypy/changeset/72f1bc3b949d/
Log: Work in progress: remove optimizeopt/fficall, and (plan to) replace
it with just a test in pyjitpl.
diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py
--- a/pypy/jit/metainterp/optimizeopt/__init__.py
+++ b/pypy/jit/metainterp/optimizeopt/__init__.py
@@ -5,7 +5,6 @@
from pypy.jit.metainterp.optimizeopt.heap import OptHeap
from pypy.jit.metainterp.optimizeopt.vstring import OptString
from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll
-from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall
from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify
from pypy.jit.metainterp.optimizeopt.pure import OptPure
from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce
@@ -21,7 +20,6 @@
('earlyforce', OptEarlyForce),
('pure', OptPure),
('heap', OptHeap),
- ('ffi', None),
('unroll', None)]
# no direct instantiation of unroll
unroll_all_opts = unrolling_iterable(ALL_OPTS)
@@ -42,11 +40,6 @@
if opt is not None:
o = opt()
optimizations.append(o)
- elif name == 'ffi' and config.translation.jit_ffi:
- # we cannot put the class directly in the unrolling_iterable,
- # because we do not want it to be seen at all (to avoid to
- # introduce a dependency on libffi in case we do not need it)
- optimizations.append(OptFfiCall())
if ('rewrite' not in enable_opts or 'virtualize' not in enable_opts
or 'heap' not in enable_opts or 'unroll' not in enable_opts
diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
deleted file mode 100644
--- a/pypy/jit/metainterp/optimizeopt/fficall.py
+++ /dev/null
@@ -1,210 +0,0 @@
-from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
-from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
-from pypy.jit.metainterp.resoperation import rop, ResOperation
-from pypy.rlib import clibffi, libffi
-from pypy.rlib.debug import debug_print
-from pypy.rlib.libffi import Func
-from pypy.rlib.objectmodel import we_are_translated
-from pypy.rpython.annlowlevel import cast_base_ptr_to_instance
-from pypy.rpython.lltypesystem import lltype, llmemory, rffi
-from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.rarithmetic import intmask
-
-
-class FuncInfo(object):
-
- argtypes = None
- restype = None
- descr = None
- prepare_op = None
-
- def __init__(self, funcval, cpu, prepare_op):
- self.funcval = funcval
- self.opargs = []
- argtypes, restype, flags = self._get_signature(funcval)
- self.descr = cpu.calldescrof_dynamic(argtypes, restype,
- EffectInfo.MOST_GENERAL,
- ffi_flags=flags)
- # ^^^ may be None if unsupported
- self.prepare_op = prepare_op
- self.delayed_ops = []
-
- def _get_signature(self, funcval):
- """
- given the funcval, return a tuple (argtypes, restype, flags), where
- the actuall types are libffi.types.*
-
- The implementation is tricky because we have three possible cases:
-
- - translated: the easiest case, we can just cast back the pointer to
- the original Func instance and read .argtypes, .restype and .flags
-
- - completely untranslated: this is what we get from test_optimizeopt
- tests. funcval contains a FakeLLObject whose _fake_class is Func,
- and we can just get .argtypes, .restype and .flags
-
- - partially translated: this happens when running metainterp tests:
- funcval contains the low-level equivalent of a Func, and thus we
- have to fish inst_argtypes and inst_restype by hand. Note that
- inst_argtypes is actually a low-level array, but we can use it
- directly since the only thing we do with it is to read its items
- """
-
- llfunc = funcval.box.getref_base()
- if we_are_translated():
- func = cast_base_ptr_to_instance(Func, llfunc)
- return func.argtypes, func.restype, func.flags
- elif getattr(llfunc, '_fake_class', None) is Func:
- # untranslated
- return llfunc.argtypes, llfunc.restype, llfunc.flags
- else:
- # partially translated
- # llfunc contains an opaque pointer to something like the following:
- # <GcStruct pypy.rlib.libffi.Func { super, inst_argtypes, inst_funcptr,
- # inst_funcsym, inst_restype }>
- #
- # Unfortunately, we cannot use the proper lltype.cast_opaque_ptr,
- # because we don't have the exact TYPE to cast to. Instead, we
- # just fish it manually :-(
- f = llfunc._obj.container
- return f.inst_argtypes, f.inst_restype, f.inst_flags
-
-
-class OptFfiCall(Optimization):
-
- def setup(self):
- self.funcinfo = None
- if self.optimizer.loop is not None:
- self.logops = self.optimizer.loop.logops
- else:
- self.logops = None
-
- def new(self):
- return OptFfiCall()
-
- def begin_optimization(self, funcval, op):
- self.rollback_maybe('begin_optimization', op)
- self.funcinfo = FuncInfo(funcval, self.optimizer.cpu, op)
-
- def commit_optimization(self):
- self.funcinfo = None
-
- def rollback_maybe(self, msg, op):
- if self.funcinfo is None:
- return # nothing to rollback
- #
- # we immediately set funcinfo to None to prevent recursion when
- # calling emit_op
- if self.logops is not None:
- debug_print('rollback: ' + msg + ': ', self.logops.repr_of_resop(op))
- funcinfo = self.funcinfo
- self.funcinfo = None
- self.emit_operation(funcinfo.prepare_op)
- for op in funcinfo.opargs:
- self.emit_operation(op)
- for delayed_op in funcinfo.delayed_ops:
- self.emit_operation(delayed_op)
-
- def emit_operation(self, op):
- # we cannot emit any operation during the optimization
- self.rollback_maybe('invalid op', op)
- Optimization.emit_operation(self, op)
-
- def optimize_CALL(self, op):
- oopspec = self._get_oopspec(op)
- ops = [op]
- if oopspec == EffectInfo.OS_LIBFFI_PREPARE:
- ops = self.do_prepare_call(op)
- elif oopspec == EffectInfo.OS_LIBFFI_PUSH_ARG:
- ops = self.do_push_arg(op)
- elif oopspec == EffectInfo.OS_LIBFFI_CALL:
- ops = self.do_call(op)
- #
- for op in ops:
- self.emit_operation(op)
-
- optimize_CALL_MAY_FORCE = optimize_CALL
-
- def optimize_FORCE_TOKEN(self, op):
- # The handling of force_token needs a bit of explanation.
- # The original trace which is getting optimized looks like this:
- # i1 = force_token()
- # setfield_gc(p0, i1, ...)
- # call_may_force(...)
- #
- # In theory, fficall should take care of both force_token and
- # setfield_gc. However, the lazy setfield optimization in heap.py
- # delays the setfield_gc, with the effect that fficall.py sees them in
- # this order:
- # i1 = force_token()
- # call_may_force(...)
- # setfield_gc(p0, i1, ...)
- #
- # This means that see the setfield_gc only the call_may_force, when
- # the optimization has already been done, and thus we need to take
- # special care just of force_token.
- #
- # Finally, the method force_lazy_setfield in heap.py reorders the
- # call_may_force and the setfield_gc, so the final result we get is
- # again force_token/setfield_gc/call_may_force.
- #
- # However, note that nowadays we also allow to have any setfield_gc
- # between libffi_prepare and libffi_call, so while the comment above
- # it's a bit superfluous, it has been left there for future reference.
- if self.funcinfo is None:
- self.emit_operation(op)
- else:
- self.funcinfo.delayed_ops.append(op)
-
- optimize_SETFIELD_GC = optimize_FORCE_TOKEN
-
- def do_prepare_call(self, op):
- self.rollback_maybe('prepare call', op)
- funcval = self._get_funcval(op)
- if not funcval.is_constant():
- return [op] # cannot optimize
- self.begin_optimization(funcval, op)
- return []
-
- def do_push_arg(self, op):
- funcval = self._get_funcval(op)
- if not self.funcinfo or self.funcinfo.funcval is not funcval:
- return [op] # cannot optimize
- self.funcinfo.opargs.append(op)
- return []
-
- def do_call(self, op):
- funcval = self._get_funcval(op)
- funcinfo = self.funcinfo
- if (not funcinfo or funcinfo.funcval is not funcval or
- funcinfo.descr is None):
- return [op] # cannot optimize
- funcsymval = self.getvalue(op.getarg(2))
- arglist = [funcsymval.get_key_box()]
- for push_op in funcinfo.opargs:
- argval = self.getvalue(push_op.getarg(2))
- arglist.append(argval.get_key_box())
- newop = ResOperation(rop.CALL_RELEASE_GIL, arglist, op.result,
- descr=funcinfo.descr)
- self.commit_optimization()
- ops = []
- for delayed_op in funcinfo.delayed_ops:
- ops.append(delayed_op)
- ops.append(newop)
- return ops
-
- def propagate_forward(self, op):
- if self.logops is not None:
- debug_print(self.logops.repr_of_resop(op))
- dispatch_opt(self, op)
-
- def _get_oopspec(self, op):
- effectinfo = op.getdescr().get_extra_info()
- return effectinfo.oopspecindex
-
- def _get_funcval(self, op):
- return self.getvalue(op.getarg(1))
-
-dispatch_opt = make_dispatcher_method(OptFfiCall, 'optimize_',
- default=OptFfiCall.emit_operation)
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
deleted file mode 100644
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
+++ /dev/null
@@ -1,315 +0,0 @@
-from pypy.rpython.lltypesystem import llmemory
-from pypy.rlib.libffi import Func, types
-from pypy.jit.metainterp.history import AbstractDescr
-from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import BaseTestBasic
-from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import LLtypeMixin
-
-class MyCallDescr(AbstractDescr):
- """
- Fake calldescr to be used inside the tests.
-
- The particularity is that it provides an __eq__ method, so that it
- comparses by value by comparing the arg_types and typeinfo fields, so you
- can check that the signature of a call is really what you want.
- """
-
- def __init__(self, arg_types, typeinfo, flags):
- self.arg_types = arg_types
- self.typeinfo = typeinfo # return type
- self.flags = flags
-
- def __eq__(self, other):
- return (self.arg_types == other.arg_types and
- self.typeinfo == other.typeinfo and
- self.flags == other.get_ffi_flags())
-
-class FakeLLObject(object):
-
- def __init__(self, **kwds):
- self.__dict__.update(kwds)
- self._TYPE = llmemory.GCREF
-
- def _identityhash(self):
- return id(self)
-
-
-class TestFfiCall(BaseTestBasic, LLtypeMixin):
-
- enable_opts = "intbounds:rewrite:virtualize:string:pure:earlyforce:heap:ffi"
-
- class namespace:
- cpu = LLtypeMixin.cpu
- FUNC = LLtypeMixin.FUNC
- vable_token_descr = LLtypeMixin.valuedescr
- valuedescr = LLtypeMixin.valuedescr
-
- int_float__int_42 = MyCallDescr('if', 'i', 42)
- int_float__int_43 = MyCallDescr('if', 'i', 43)
- funcptr = FakeLLObject()
- func = FakeLLObject(_fake_class=Func,
- argtypes=[types.sint, types.double],
- restype=types.sint,
- flags=42)
- func2 = FakeLLObject(_fake_class=Func,
- argtypes=[types.sint, types.double],
- restype=types.sint,
- flags=43)
- #
- ffi_slong = types.slong
- dyn_123_field = cpu.fielddescrof_dynamic(offset=123,
- fieldsize=types.slong.c_size,
- is_pointer=False,
- is_float=False,
- is_signed=True)
- #
- def calldescr(cpu, FUNC, oopspecindex, extraeffect=None):
- if extraeffect == EffectInfo.EF_RANDOM_EFFECTS:
- f = None # means "can force all" really
- else:
- f = []
- einfo = EffectInfo(f, f, f, f, oopspecindex=oopspecindex,
- extraeffect=extraeffect)
- return cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, einfo)
- #
- libffi_prepare = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PREPARE)
- libffi_push_arg = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_PUSH_ARG)
- libffi_call = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_CALL,
- EffectInfo.EF_RANDOM_EFFECTS)
- libffi_struct_getfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_GETFIELD)
- libffi_struct_setfield = calldescr(cpu, FUNC, EffectInfo.OS_LIBFFI_STRUCT_SETFIELD)
-
- namespace = namespace.__dict__
-
- # ----------------------------------------------------------------------
- # this group of tests is the most important, as they represent the "real"
- # cases you actually get when using rlib.libffi
-
- def test_ffi_call_opt(self):
- ops = """
- [i0, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1)
- """
- expected = """
- [i0, f1]
- i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1)
- """
- loop = self.optimize_loop(ops, expected)
-
- def test_ffi_call_nonconst(self):
- ops = """
- [i0, f1, p2]
- call(0, p2, descr=libffi_prepare)
- call(0, p2, i0, descr=libffi_push_arg)
- call(0, p2, f1, descr=libffi_push_arg)
- i3 = call_may_force(0, p2, 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1, p2)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
-
- def test_handle_virtualizables(self):
- # this test needs an explanation to understand what goes on: see the
- # comment in optimize_FORCE_TOKEN
- ops = """
- [i0, f1, p2]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i4 = force_token()
- setfield_gc(p2, i4, descr=vable_token_descr)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() [p2]
- guard_no_exception() [p2]
- jump(i3, f1, p2)
- """
- expected = """
- [i0, f1, p2]
- i4 = force_token()
- setfield_gc(p2, i4, descr=vable_token_descr)
- i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
- guard_not_forced() [p2]
- guard_no_exception() [p2]
- jump(i3, f1, p2)
- """
- loop = self.optimize_loop(ops, expected)
-
- # ----------------------------------------------------------------------
- # in pratice, the situations described in these tests should never happen,
- # but we still want to ensure correctness
-
- def test_rollback_if_op_in_between(self):
- ops = """
- [i0, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- i1 = int_add(i0, 1)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
-
- def test_rollback_multiple_calls(self):
- ops = """
- [i0, i2, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- #
- # this is the culprit!
- call(0, ConstPtr(func2), descr=libffi_prepare)
- #
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- call(0, ConstPtr(func2), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func2), f1, descr=libffi_push_arg)
- i4 = call_may_force(0, ConstPtr(func2), 67890, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, i4, f1)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
-
- def test_rollback_multiple_prepare(self):
- ops = """
- [i0, i2, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- #
- # this is the culprit!
- call(0, ConstPtr(func2), descr=libffi_prepare)
- #
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- call(0, ConstPtr(func2), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func2), f1, descr=libffi_push_arg)
- i4 = call_may_force(0, ConstPtr(func2), 67890, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, i4, f1)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
-
- def test_optimize_nested_call(self):
- ops = """
- [i0, i2, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- #
- # this "nested" call is nicely optimized
- call(0, ConstPtr(func2), descr=libffi_prepare)
- call(0, ConstPtr(func2), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func2), f1, descr=libffi_push_arg)
- i4 = call_may_force(0, ConstPtr(func2), 67890, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- #
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, i4, f1)
- """
- expected = """
- [i0, i2, f1]
- call(0, ConstPtr(func), descr=libffi_prepare)
- #
- # this "nested" call is nicely optimized
- i4 = call_release_gil(67890, i0, f1, descr=int_float__int_43)
- guard_not_forced() []
- guard_no_exception() []
- #
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, i4, f1)
- """
- loop = self.optimize_loop(ops, expected)
-
- def test_rollback_force_token(self):
- ops = """
- [i0, f1, p2]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- i4 = force_token()
- i5 = int_add(i0, 1) # culprit!
- setfield_gc(p2, i4, descr=vable_token_descr)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() [p2]
- guard_no_exception() [p2]
- jump(i3, f1, p2)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
-
- def test_allow_setfields_in_between(self):
- ops = """
- [i0, f1, p2]
- call(0, ConstPtr(func), descr=libffi_prepare)
- call(0, ConstPtr(func), i0, descr=libffi_push_arg)
- call(0, ConstPtr(func), f1, descr=libffi_push_arg)
- setfield_gc(p2, i0, descr=valuedescr)
- i3 = call_may_force(0, ConstPtr(func), 12345, descr=libffi_call)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1, p2)
- """
- expected = """
- [i0, f1, p2]
- setfield_gc(p2, i0, descr=valuedescr)
- i3 = call_release_gil(12345, i0, f1, descr=int_float__int_42)
- guard_not_forced() []
- guard_no_exception() []
- jump(i3, f1, p2)
- """
- loop = self.optimize_loop(ops, expected)
-
- def test_ffi_struct_fields(self):
- ops = """
- [i0]
- i1 = call(0, ConstClass(ffi_slong), i0, 123, descr=libffi_struct_getfield)
- i2 = int_add(i1, 1)
- call(0, ConstClass(ffi_slong), i0, 123, i2, descr=libffi_struct_setfield)
- jump(i1)
- """
- expected = """
- [i0]
- i1 = getfield_raw(i0, descr=dyn_123_field)
- i2 = int_add(i1, 1)
- setfield_raw(i0, i2, descr=dyn_123_field)
- jump(i1)
- """
- loop = self.optimize_loop(ops, expected)
-
- def test_ffi_struct_fields_nonconst(self):
- ops = """
- [i0, i1]
- i2 = call(0, ConstClass(ffi_slong), i0, i1, descr=libffi_struct_getfield)
- i3 = call(0, i1 , i0, 123, descr=libffi_struct_getfield)
- jump(i1)
- """
- expected = ops
- loop = self.optimize_loop(ops, expected)
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -1383,6 +1383,8 @@
if assembler_call:
vablebox = self.metainterp.direct_assembler_call(
assembler_call_jd)
+ elif effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL:
+ xxxx
if resbox is not None:
self.make_result_of_lastop(resbox)
self.metainterp.vable_after_residual_call()
diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py
--- a/pypy/jit/metainterp/test/test_fficall.py
+++ b/pypy/jit/metainterp/test/test_fficall.py
@@ -1,210 +1,52 @@
-from __future__ import with_statement
import py
+from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.jit.metainterp.test.support import LLJitMixin
+from pypy.rlib import jit
+from pypy.rlib.jit_libffi import types, CIF_DESCRIPTION, FFI_TYPE_PP
-from pypy.jit.metainterp.test.support import LLJitMixin
-from pypy.rlib.jit import JitDriver, promote, dont_look_inside
-from pypy.rlib.libffi import (ArgChain, IS_32_BIT, array_getitem, array_setitem,
- types, struct_setfield_int, struct_getfield_int)
-from pypy.rlib.objectmodel import specialize
-from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong
-from pypy.rlib.test.test_libffi import TestLibffiCall as _TestLibffiCall
-from pypy.rlib.unroll import unrolling_iterable
-from pypy.rpython.lltypesystem import lltype, rffi
-from pypy.tool.sourcetools import func_with_new_name
+def get_description(atypes, rtype):
+ p = lltype.malloc(CIF_DESCRIPTION, len(atypes),
+ flavor='raw', immortal=True)
+ rffi.setintfield(p, 'abi', 42)
+ p.nargs = len(atypes)
+ p.rtype = rtype
+ p.atypes = lltype.malloc(FFI_TYPE_PP.TO, len(atypes),
+ flavor='raw', immortal=True)
+ for i in range(len(atypes)):
+ p.atypes[i] = atypes[i]
+ return p
-class FfiCallTests(_TestLibffiCall):
- # ===> ../../../rlib/test/test_libffi.py
+ at jit.oopspec("libffi_call(cif_description, func_addr, exchange_buffer)")
+def fake_call(cif_description, func_addr, exchange_buffer):
+ assert rffi.cast(lltype.Signed, func_addr) == 123
+ assert rffi.cast(rffi.SIGNEDP, exchange_buffer)[0] == 456
+ assert rffi.cast(rffi.SIGNEDP, exchange_buffer)[1] == 789
+ rffi.cast(rffi.SIGNEDP, exchange_buffer)[2] = -42
- def call(self, funcspec, args, RESULT, is_struct=False, jitif=[]):
- """
- Call the function specified by funcspec in a loop, and let the jit to
- see and optimize it.
- """
- #
- lib, name, argtypes, restype = funcspec
- method_and_args = []
- for argval in args:
- if isinstance(argval, tuple):
- method_name, argval = argval
- else:
- method_name = 'arg'
- method_and_args.append((method_name, argval))
- method_and_args = unrolling_iterable(method_and_args)
- #
- reds = ['n', 'res', 'func']
- if (RESULT is rffi.DOUBLE or
- IS_32_BIT and RESULT in [rffi.LONGLONG, rffi.ULONGLONG]):
- reds = ['n', 'func', 'res'] # 'double' floats must be *after* refs
- driver = JitDriver(reds=reds, greens=[])
- init_result = rffi.cast(RESULT, 0)
- #
- def g(func):
- # a different function, which is marked as "dont_look_inside"
- # in case it uses an unsupported argument
- argchain = ArgChain()
- # this loop is unrolled
- for method_name, argval in method_and_args:
- getattr(argchain, method_name)(argval)
- return func.call(argchain, RESULT, is_struct=is_struct)
- #
- def f(n):
- func = lib.getpointer(name, argtypes, restype)
- res = init_result
- while n < 10:
- driver.jit_merge_point(n=n, res=res, func=func)
- promote(func)
- res = g(func)
- n += 1
+
+class FfiCallTests(object):
+
+ def test_call_simple(self):
+ cif_description = get_description([types.signed]*2, types.signed)
+ func_addr = rffi.cast(rffi.VOIDP, 123)
+ SIZE_SIGNED = rffi.sizeof(rffi.SIGNED)
+ def f(n, m):
+ exbuf = lltype.malloc(rffi.CCHARP.TO, 24, flavor='raw', zero=True)
+ rffi.cast(rffi.SIGNEDP, exbuf)[0] = n
+ data = rffi.ptradd(exbuf, SIZE_SIGNED)
+ rffi.cast(rffi.SIGNEDP, data)[0] = m
+ fake_call(cif_description, func_addr, exbuf)
+ data = rffi.ptradd(exbuf, 2 * SIZE_SIGNED)
+ res = rffi.cast(rffi.SIGNEDP, data)[0]
+ lltype.free(exbuf, flavor='raw')
return res
- #
- res = self.meta_interp(f, [0], backendopt=True,
- supports_floats = self.supports_all,
- supports_longlong = self.supports_all,
- supports_singlefloats = self.supports_all)
- d = {'floats': self.supports_all,
- 'longlong': self.supports_all or not IS_32_BIT,
- 'singlefloats': self.supports_all,
- 'byval': False}
- supported = all(d[check] for check in jitif)
- if supported:
- self.check_resops(
- call_release_gil=2, # a CALL_RELEASE_GIL, and no other CALLs
- call=0,
- call_may_force=0,
- guard_no_exception=2,
- guard_not_forced=2,
- int_add=2,
- int_lt=2,
- guard_true=2,
- jump=1)
- else:
- self.check_resops(
- call_release_gil=0, # no CALL_RELEASE_GIL
- int_add=2,
- int_lt=2,
- guard_true=2,
- jump=1)
- return res
- def test_byval_result(self):
- _TestLibffiCall.test_byval_result(self)
- test_byval_result.__doc__ = _TestLibffiCall.test_byval_result.__doc__
- test_byval_result.dont_track_allocations = True
-
-class FfiLookupTests(object):
- def test_array_fields(self):
- myjitdriver = JitDriver(
- greens = [],
- reds = ["n", "i", "points", "result_point"],
- )
-
- POINT = lltype.Struct("POINT",
- ("x", lltype.Signed),
- ("y", lltype.Signed),
- )
- def f(points, result_point, n):
- i = 0
- while i < n:
- myjitdriver.jit_merge_point(i=i, points=points, n=n,
- result_point=result_point)
- x = array_getitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, points, i, 0
- )
- y = array_getitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, points, i, rffi.sizeof(lltype.Signed)
- )
-
- cur_x = array_getitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, 0
- )
- cur_y = array_getitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, rffi.sizeof(lltype.Signed)
- )
-
- array_setitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, 0, cur_x + x
- )
- array_setitem(
- types.slong, rffi.sizeof(lltype.Signed) * 2, result_point, 0, rffi.sizeof(lltype.Signed), cur_y + y
- )
- i += 1
-
- def main(n):
- with lltype.scoped_alloc(rffi.CArray(POINT), n) as points:
- with lltype.scoped_alloc(rffi.CArray(POINT), 1) as result_point:
- for i in xrange(n):
- points[i].x = i * 2
- points[i].y = i * 2 + 1
- points = rffi.cast(rffi.CArrayPtr(lltype.Char), points)
- result_point[0].x = 0
- result_point[0].y = 0
- result_point = rffi.cast(rffi.CArrayPtr(lltype.Char), result_point)
- f(points, result_point, n)
- result_point = rffi.cast(rffi.CArrayPtr(POINT), result_point)
- return result_point[0].x * result_point[0].y
-
- assert self.meta_interp(main, [10]) == main(10) == 9000
- self.check_resops({'jump': 1, 'int_lt': 2, 'setinteriorfield_raw': 4,
- 'getinteriorfield_raw': 8, 'int_add': 6, 'guard_true': 2})
-
- def _test_getitem_type(self, TYPE, ffitype, COMPUTE_TYPE):
- reds = ["n", "i", "s", "data"]
- if COMPUTE_TYPE is lltype.Float:
- # Move the float var to the back.
- reds.remove("s")
- reds.append("s")
- myjitdriver = JitDriver(
- greens = [],
- reds = reds,
- )
- def f(data, n):
- i = 0
- s = rffi.cast(COMPUTE_TYPE, 0)
- while i < n:
- myjitdriver.jit_merge_point(n=n, i=i, s=s, data=data)
- s += rffi.cast(COMPUTE_TYPE, array_getitem(ffitype, rffi.sizeof(TYPE), data, 0, 0))
- i += 1
- return s
- def main(n):
- with lltype.scoped_alloc(rffi.CArray(TYPE), 1) as data:
- data[0] = rffi.cast(TYPE, 200)
- return f(data, n)
- assert self.meta_interp(main, [10]) == 2000
-
- def test_array_getitem_uint8(self):
- self._test_getitem_type(rffi.UCHAR, types.uchar, lltype.Signed)
- self.check_resops({'jump': 1, 'int_lt': 2, 'getinteriorfield_raw': 2,
- 'guard_true': 2, 'int_add': 4})
-
- def test_array_getitem_float(self):
- self._test_getitem_type(rffi.FLOAT, types.float, lltype.Float)
+ res = f(456, 789)
+ assert res == -42
+ res = self.interp_operations(f, [456, 789])
+ assert res == -42
class TestFfiCall(FfiCallTests, LLJitMixin):
- supports_all = False
-
-class TestFfiCallSupportAll(FfiCallTests, LLJitMixin):
- supports_all = True # supports_{floats,longlong,singlefloats}
-
- def test_struct_getfield(self):
- myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'addr'])
-
- def f(n):
- i = 0
- addr = lltype.malloc(rffi.VOIDP.TO, 10, flavor='raw')
- while i < n:
- myjitdriver.jit_merge_point(n=n, i=i, addr=addr)
- struct_setfield_int(types.slong, addr, 0, 1)
- i += struct_getfield_int(types.slong, addr, 0)
- lltype.free(addr, flavor='raw')
- return i
- assert self.meta_interp(f, [20]) == f(20)
- self.check_resops(
- setfield_raw=2,
- getfield_raw=2,
- call=0)
-
-
-class TestFfiLookup(FfiLookupTests, LLJitMixin):
pass
diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py
--- a/pypy/rlib/jit.py
+++ b/pypy/rlib/jit.py
@@ -402,7 +402,7 @@
"""Inconsistency in the JIT hints."""
ENABLE_ALL_OPTS = (
- 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:ffi:unroll')
+ 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll')
PARAMETER_DOCS = {
'threshold': 'number of times a loop has to run for it to become hot',
More information about the pypy-commit
mailing list