[pypy-commit] pypy py3k: merge default
pjenvey
pypy.commits at gmail.com
Sun Oct 9 20:37:20 EDT 2016
Author: Philip Jenvey <pjenvey at underboss.org>
Branch: py3k
Changeset: r87678:ce7a5c1077b4
Date: 2016-10-09 17:35 -0700
http://bitbucket.org/pypy/pypy/changeset/ce7a5c1077b4/
Log: merge default
diff too long, truncating to 2000 out of 2071 lines
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -67,3 +67,8 @@
Change the ``timeit`` module: it now prints the average time and the standard
deviation over 7 runs by default, instead of the minimum. The minimum is often
misleading.
+
+.. branch: unrecursive-opt
+
+Make optimiseopt iterative instead of recursive so it can be reasoned about
+more easily and debugging is faster.
diff --git a/pypy/module/faulthandler/faulthandler.c b/pypy/module/faulthandler/faulthandler.c
--- a/pypy/module/faulthandler/faulthandler.c
+++ b/pypy/module/faulthandler/faulthandler.c
@@ -323,7 +323,7 @@
faulthandler_register(int signum, int chain, _Py_sighandler_t *p_previous)
{
struct sigaction action;
- action.sa_handler = faulthandler_user;
+ action.sa_sigaction = faulthandler_user;
sigemptyset(&action.sa_mask);
/* if the signal is received while the kernel is executing a system
call, try to restart the system call instead of interrupting it and
diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py
--- a/rpython/jit/backend/tool/viewcode.py
+++ b/rpython/jit/backend/tool/viewcode.py
@@ -28,7 +28,7 @@
pass
def find_objdump():
- exe = ('objdump', 'gobjdump')
+ exe = ('objdump', 'gobjdump', 'objdump.exe')
path = os.environ['PATH'].split(os.pathsep)
for e in exe:
for p in path:
diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py
--- a/rpython/jit/metainterp/optimizeopt/earlyforce.py
+++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py
@@ -26,7 +26,7 @@
for arg in op.getarglist():
self.optimizer.force_box(arg, self)
- self.emit_operation(op)
+ return self.emit(op)
def setup(self):
self.optimizer.optearlyforce = self
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -128,7 +128,7 @@
if a is optheap.postponed_op:
optheap.emit_postponed_op()
break
- optheap.next_optimization.propagate_forward(op)
+ optheap.emit_extra(op, emit=False)
if not can_cache:
return
# Once it is done, we can put at least one piece of information
@@ -259,7 +259,7 @@
if self.postponed_op:
postponed_op = self.postponed_op
self.postponed_op = None
- self.next_optimization.propagate_forward(postponed_op)
+ self.emit_extra(postponed_op, emit=False)
def produce_potential_short_preamble_ops(self, sb):
descrkeys = self.cached_fields.keys()
@@ -312,7 +312,7 @@
cf = submap[index] = ArrayCachedItem(index)
return cf
- def emit_operation(self, op):
+ def emit(self, op):
self.emitting_operation(op)
self.emit_postponed_op()
opnum = op.opnum
@@ -320,7 +320,7 @@
or rop.is_ovf(opnum)):
self.postponed_op = op
else:
- Optimization.emit_operation(self, op)
+ return Optimization.emit(self, op)
def emitting_operation(self, op):
if rop.has_no_side_effect(op.opnum):
@@ -370,7 +370,7 @@
if oopspecindex == EffectInfo.OS_DICT_LOOKUP:
if self._optimize_CALL_DICT_LOOKUP(op):
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_F = optimize_CALL_I
optimize_CALL_R = optimize_CALL_I
optimize_CALL_N = optimize_CALL_I
@@ -428,7 +428,7 @@
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION
@@ -543,12 +543,21 @@
return
# default case: produce the operation
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ # return self.emit(op)
+ return self.emit(op)
+
+ def postprocess_GETFIELD_GC_I(self, op):
# then remember the result of reading the field
- structinfo.setfield(descr, op.getarg(0), op, optheap=self, cf=cf)
+ structinfo = self.ensure_ptr_info_arg0(op)
+ cf = self.field_cache(op.getdescr())
+ structinfo.setfield(op.getdescr(), op.getarg(0), op, optheap=self,
+ cf=cf)
optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I
optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I
+ postprocess_GETFIELD_GC_R = postprocess_GETFIELD_GC_I
+ postprocess_GETFIELD_GC_F = postprocess_GETFIELD_GC_I
+
def optimize_SETFIELD_GC(self, op):
self.setfield(op)
#opnum = OpHelpers.getfield_pure_for_descr(op.getdescr())
@@ -582,9 +591,16 @@
self.getintbound(op.getarg(1)))
# default case: produce the operation
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ # return self.emit(op)
+ return self.emit(op)
+
+ def postprocess_GETARRAYITEM_GC_I(self, op):
# then remember the result of reading the array item
- if cf is not None:
+ arrayinfo = self.ensure_ptr_info_arg0(op)
+ indexb = self.getintbound(op.getarg(1))
+ if indexb.is_constant():
+ index = indexb.getint()
+ cf = self.arrayitem_cache(op.getdescr(), index)
arrayinfo.setitem(op.getdescr(), indexb.getint(),
self.get_box_replacement(op.getarg(0)),
self.get_box_replacement(op), optheap=self,
@@ -592,6 +608,9 @@
optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I
optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I
+ postprocess_GETARRAYITEM_GC_R = postprocess_GETARRAYITEM_GC_I
+ postprocess_GETARRAYITEM_GC_F = postprocess_GETARRAYITEM_GC_I
+
def optimize_GETARRAYITEM_GC_PURE_I(self, op):
arrayinfo = self.ensure_ptr_info_arg0(op)
indexb = self.getintbound(op.getarg(1))
@@ -610,7 +629,7 @@
self.force_lazy_setarrayitem(op.getdescr(), self.getintbound(op.getarg(1)))
# default case: produce the operation
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_PURE_I
optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_PURE_I
@@ -634,7 +653,7 @@
# variable index, so make sure the lazy setarrayitems are done
self.force_lazy_setarrayitem(op.getdescr(), indexb, can_cache=False)
# and then emit the operation
- self.emit_operation(op)
+ return self.emit(op)
def optimize_QUASIIMMUT_FIELD(self, op):
# Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr)
@@ -672,9 +691,11 @@
if self._seen_guard_not_invalidated:
return
self._seen_guard_not_invalidated = True
- self.emit_operation(op)
+ return self.emit(op)
dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_',
- default=OptHeap.emit_operation)
+ default=OptHeap.emit)
OptHeap.propagate_forward = dispatch_opt
+dispatch_postprocess = make_dispatcher_method(OptHeap, 'postprocess_')
+OptHeap.propagate_postprocess = dispatch_postprocess
diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py
--- a/rpython/jit/metainterp/optimizeopt/info.py
+++ b/rpython/jit/metainterp/optimizeopt/info.py
@@ -142,7 +142,7 @@
return constptr
#
op.set_forwarded(None)
- optforce.emit_operation(op)
+ optforce.emit_extra(op)
newop = optforce.getlastop()
if newop is not op:
op.set_forwarded(newop)
@@ -220,7 +220,7 @@
setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox],
descr=fielddescr)
self._fields[i] = None
- optforce.emit_operation(setfieldop)
+ optforce.emit_extra(setfieldop)
def _force_at_the_end_of_preamble(self, op, optforce, rec):
if self._fields is None:
@@ -412,7 +412,7 @@
# 'op = CALL_I(..., OS_RAW_MALLOC_VARSIZE_CHAR)'.
# Emit now a CHECK_MEMORY_ERROR resop.
check_op = ResOperation(rop.CHECK_MEMORY_ERROR, [op])
- optforce.emit_operation(check_op)
+ optforce.emit_extra(check_op)
#
buffer = self._get_buffer()
for i in range(len(buffer.offsets)):
@@ -422,7 +422,7 @@
itembox = buffer.values[i]
setfield_op = ResOperation(rop.RAW_STORE,
[op, ConstInt(offset), itembox], descr=descr)
- optforce.emit_operation(setfield_op)
+ optforce.emit_extra(setfield_op)
def _visitor_walk_recursive(self, op, visitor, optimizer):
itemboxes = [optimizer.get_box_replacement(box)
@@ -537,7 +537,7 @@
[op, ConstInt(i), subbox],
descr=descr)
self._items[i] = None
- optforce.emit_operation(setop)
+ optforce.emit_extra(setop)
optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items)))
def setitem(self, descr, index, struct, op, optheap=None, cf=None):
@@ -651,7 +651,7 @@
setfieldop = ResOperation(rop.SETINTERIORFIELD_GC,
[op, ConstInt(index), subbox],
descr=fielddescr)
- optforce.emit_operation(setfieldop)
+ optforce.emit_extra(setfieldop)
# heapcache does not work for interiorfields
# if it does, we would need a fix here
i += 1
diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py
--- a/rpython/jit/metainterp/optimizeopt/intbounds.py
+++ b/rpython/jit/metainterp/optimizeopt/intbounds.py
@@ -44,11 +44,10 @@
redundant guards"""
def propagate_forward(self, op):
- dispatch_opt(self, op)
+ return dispatch_opt(self, op)
- def opt_default(self, op):
- assert not op.is_ovf()
- self.emit_operation(op)
+ def propagate_postprocess(self, op):
+ return dispatch_postprocess(self, op)
def propagate_bounds_backward(self, box):
# FIXME: This takes care of the instruction where box is the reuslt
@@ -62,7 +61,9 @@
dispatch_bounds_ops(self, box)
def _optimize_guard_true_false_value(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def _postprocess_guard_true_false_value(self, op):
if op.getarg(0).type == 'i':
self.propagate_bounds_backward(op.getarg(0))
@@ -70,18 +71,26 @@
optimize_GUARD_FALSE = _optimize_guard_true_false_value
optimize_GUARD_VALUE = _optimize_guard_true_false_value
+ postprocess_GUARD_TRUE = _postprocess_guard_true_false_value
+ postprocess_GUARD_FALSE = _postprocess_guard_true_false_value
+ postprocess_GUARD_VALUE = _postprocess_guard_true_false_value
+
def optimize_INT_OR_or_XOR(self, op):
v1 = self.get_box_replacement(op.getarg(0))
- b1 = self.getintbound(v1)
v2 = self.get_box_replacement(op.getarg(1))
- b2 = self.getintbound(v2)
if v1 is v2:
if op.getopnum() == rop.INT_OR:
self.make_equal_to(op, v1)
else:
self.make_constant_int(op, 0)
- return
- self.emit_operation(op)
+ return None
+ return self.emit(op)
+
+ def postprocess_INT_OR_or_XOR(self, op):
+ v1 = self.get_box_replacement(op.getarg(0))
+ b1 = self.getintbound(v1)
+ v2 = self.get_box_replacement(op.getarg(1))
+ b2 = self.getintbound(v2)
if b1.known_ge(IntBound(0, 0)) and \
b2.known_ge(IntBound(0, 0)):
r = self.getintbound(op)
@@ -91,11 +100,15 @@
optimize_INT_OR = optimize_INT_OR_or_XOR
optimize_INT_XOR = optimize_INT_OR_or_XOR
+ postprocess_INT_OR = postprocess_INT_OR_or_XOR
+ postprocess_INT_XOR = postprocess_INT_OR_or_XOR
+
def optimize_INT_AND(self, op):
+ return self.emit(op)
+
+ def postprocess_INT_AND(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
- self.emit_operation(op)
-
r = self.getintbound(op)
pos1 = b1.known_ge(IntBound(0, 0))
pos2 = b2.known_ge(IntBound(0, 0))
@@ -107,7 +120,9 @@
r.make_le(b2)
def optimize_INT_SUB(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_INT_SUB(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
b = b1.sub_bound(b2)
@@ -118,8 +133,7 @@
arg1 = self.get_box_replacement(op.getarg(0))
arg2 = self.get_box_replacement(op.getarg(1))
if self.is_raw_ptr(arg1) or self.is_raw_ptr(arg2):
- self.emit_operation(op)
- return
+ return self.emit(op)
v1 = self.getintbound(arg1)
v2 = self.getintbound(arg2)
@@ -153,7 +167,9 @@
arg2 = ConstInt(sum)
op = self.replace_op_with(op, rop.INT_ADD, args=[arg1, arg2])
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_INT_ADD(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
r = self.getintbound(op)
@@ -162,38 +178,37 @@
r.intersect(b)
def optimize_INT_MUL(self, op):
+ return self.emit(op)
+
+ def postprocess_INT_MUL(self, op):
b1 = self.getintbound(op.getarg(0))
b2 = self.getintbound(op.getarg(1))
- self.emit_operation(op)
r = self.getintbound(op)
b = b1.mul_bound(b2)
if b.bounded():
r.intersect(b)
def optimize_CALL_PURE_I(self, op):
+ return self.emit(op)
+
+ def postprocess_CALL_PURE_I(self, op):
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call.
effectinfo = op.getdescr().get_extra_info()
oopspecindex = effectinfo.oopspecindex
if oopspecindex == EffectInfo.OS_INT_PY_DIV:
- self.opt_call_INT_PY_DIV(op)
- return
+ self.post_call_INT_PY_DIV(op)
elif oopspecindex == EffectInfo.OS_INT_PY_MOD:
- self.opt_call_INT_PY_MOD(op)
- return
- self.emit_operation(op)
+ self.post_call_INT_PY_MOD(op)
- def opt_call_INT_PY_DIV(self, op):
+ def post_call_INT_PY_DIV(self, op):
b1 = self.getintbound(op.getarg(1))
b2 = self.getintbound(op.getarg(2))
- self.emit_operation(op)
r = self.getintbound(op)
r.intersect(b1.py_div_bound(b2))
- def opt_call_INT_PY_MOD(self, op):
- b1 = self.getintbound(op.getarg(1))
+ def post_call_INT_PY_MOD(self, op):
b2 = self.getintbound(op.getarg(2))
- self.emit_operation(op)
if b2.is_constant():
val = b2.getint()
r = self.getintbound(op)
@@ -205,11 +220,13 @@
r.make_le(IntBound(0, 0))
def optimize_INT_LSHIFT(self, op):
+ return self.emit(op)
+
+ def postprocess_INT_LSHIFT(self, op):
arg0 = self.get_box_replacement(op.getarg(0))
b1 = self.getintbound(arg0)
arg1 = self.get_box_replacement(op.getarg(1))
b2 = self.getintbound(arg1)
- self.emit_operation(op)
r = self.getintbound(op)
b = b1.lshift_bound(b2)
r.intersect(b)
@@ -228,10 +245,15 @@
if b.has_lower and b.has_upper and b.lower == b.upper:
# constant result (likely 0, for rshifts that kill all bits)
self.make_constant_int(op, b.lower)
- else:
- self.emit_operation(op)
- r = self.getintbound(op)
- r.intersect(b)
+ return None
+ return self.emit(op)
+
+ def postprocess_INT_RSHIFT(self, op):
+ b1 = self.getintbound(op.getarg(0))
+ b2 = self.getintbound(op.getarg(1))
+ b = b1.rshift_bound(b2)
+ r = self.getintbound(op)
+ r.intersect(b)
def optimize_GUARD_NO_OVERFLOW(self, op):
lastop = self.last_emitted_operation
@@ -257,7 +279,7 @@
self.pure_from_args(rop.INT_SUB, [args[0], result], args[1])
#elif opnum == rop.INT_MUL_OVF:
# self.pure(rop.INT_MUL, args[:], result)
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_OVERFLOW(self, op):
# If INT_xxx_OVF was replaced by INT_xxx, *but* we still see
@@ -270,7 +292,7 @@
raise InvalidLoop('An INT_xxx_OVF was proven not to overflow but' +
'guarded with GUARD_OVERFLOW')
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_ADD_OVF(self, op):
b1 = self.getintbound(op.getarg(0))
@@ -281,7 +303,12 @@
# by optimize_GUARD_NO_OVERFLOW; if we see instead an
# optimize_GUARD_OVERFLOW, then InvalidLoop.
op = self.replace_op_with(op, rop.INT_ADD)
- self.emit_operation(op) # emit the op
+ return self.emit(op)
+
+ def postprocess_INT_ADD_OVF(self, op):
+ b1 = self.getintbound(op.getarg(0))
+ b2 = self.getintbound(op.getarg(1))
+ resbound = b1.add_bound(b2)
r = self.getintbound(op)
r.intersect(resbound)
@@ -292,11 +319,18 @@
b1 = self.getintbound(arg1)
if arg0.same_box(arg1):
self.make_constant_int(op, 0)
- return
+ return None
resbound = b0.sub_bound(b1)
if resbound.bounded():
op = self.replace_op_with(op, rop.INT_SUB)
- self.emit_operation(op) # emit the op
+ return self.emit(op)
+
+ def postprocess_INT_SUB_OVF(self, op):
+ arg0 = self.get_box_replacement(op.getarg(0))
+ arg1 = self.get_box_replacement(op.getarg(1))
+ b0 = self.getintbound(arg0)
+ b1 = self.getintbound(arg1)
+ resbound = b0.sub_bound(b1)
r = self.getintbound(op)
r.intersect(resbound)
@@ -306,7 +340,12 @@
resbound = b1.mul_bound(b2)
if resbound.bounded():
op = self.replace_op_with(op, rop.INT_MUL)
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_INT_MUL_OVF(self, op):
+ b1 = self.getintbound(op.getarg(0))
+ b2 = self.getintbound(op.getarg(1))
+ resbound = b1.mul_bound(b2)
r = self.getintbound(op)
r.intersect(resbound)
@@ -320,7 +359,7 @@
elif b1.known_ge(b2) or arg1 is arg2:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_GT(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
@@ -332,7 +371,7 @@
elif b1.known_le(b2) or arg1 is arg2:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_LE(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
@@ -344,7 +383,7 @@
elif b1.known_gt(b2):
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_GE(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
@@ -356,7 +395,7 @@
elif b1.known_lt(b2):
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_EQ(self, op):
arg0 = self.get_box_replacement(op.getarg(0))
@@ -370,7 +409,7 @@
elif arg0.same_box(arg1):
self.make_constant_int(op, 1)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_NE(self, op):
arg0 = self.get_box_replacement(op.getarg(0))
@@ -384,14 +423,14 @@
elif arg0 is arg1:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_FORCE_GE_ZERO(self, op):
b = self.getintbound(op.getarg(0))
if b.known_ge(IntBound(0, 0)):
self.make_equal_to(op, op.getarg(0))
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_SIGNEXT(self, op):
b = self.getintbound(op.getarg(0))
@@ -402,29 +441,43 @@
if bounds.contains_bound(b):
self.make_equal_to(op, op.getarg(0))
else:
- self.emit_operation(op)
- bres = self.getintbound(op)
- bres.intersect(bounds)
+ return self.emit(op)
+
+ def postprocess_INT_SIGNEXT(self, op):
+ numbits = op.getarg(1).getint() * 8
+ start = -(1 << (numbits - 1))
+ stop = 1 << (numbits - 1)
+ bounds = IntBound(start, stop - 1)
+ bres = self.getintbound(op)
+ bres.intersect(bounds)
def optimize_ARRAYLEN_GC(self, op):
+ return self.emit(op)
+
+ def postprocess_ARRAYLEN_GC(self, op):
array = self.ensure_ptr_info_arg0(op)
- self.emit_operation(op)
self.optimizer.setintbound(op, array.getlenbound(None))
def optimize_STRLEN(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_STRLEN(self, op):
self.make_nonnull_str(op.getarg(0), vstring.mode_string)
array = self.getptrinfo(op.getarg(0))
self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string))
def optimize_UNICODELEN(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_UNICODELEN(self, op):
self.make_nonnull_str(op.getarg(0), vstring.mode_unicode)
array = self.getptrinfo(op.getarg(0))
self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode))
def optimize_STRGETITEM(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_STRGETITEM(self, op):
v1 = self.getintbound(op)
v2 = self.getptrinfo(op.getarg(0))
intbound = self.getintbound(op.getarg(1))
@@ -436,7 +489,9 @@
v1.make_lt(IntUpperBound(256))
def optimize_GETFIELD_RAW_I(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_GETFIELD_RAW_I(self, op):
descr = op.getdescr()
if descr.is_integer_bounded():
b1 = self.getintbound(op)
@@ -449,12 +504,24 @@
optimize_GETFIELD_GC_R = optimize_GETFIELD_RAW_I
optimize_GETFIELD_GC_F = optimize_GETFIELD_RAW_I
+ postprocess_GETFIELD_RAW_F = postprocess_GETFIELD_RAW_I
+ postprocess_GETFIELD_RAW_R = postprocess_GETFIELD_RAW_I
+ postprocess_GETFIELD_GC_I = postprocess_GETFIELD_RAW_I
+ postprocess_GETFIELD_GC_R = postprocess_GETFIELD_RAW_I
+ postprocess_GETFIELD_GC_F = postprocess_GETFIELD_RAW_I
+
optimize_GETINTERIORFIELD_GC_I = optimize_GETFIELD_RAW_I
optimize_GETINTERIORFIELD_GC_R = optimize_GETFIELD_RAW_I
optimize_GETINTERIORFIELD_GC_F = optimize_GETFIELD_RAW_I
+ postprocess_GETINTERIORFIELD_GC_I = postprocess_GETFIELD_RAW_I
+ postprocess_GETINTERIORFIELD_GC_R = postprocess_GETFIELD_RAW_I
+ postprocess_GETINTERIORFIELD_GC_F = postprocess_GETFIELD_RAW_I
+
def optimize_GETARRAYITEM_RAW_I(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_GETARRAYITEM_RAW_I(self, op):
descr = op.getdescr()
if descr and descr.is_item_integer_bounded():
intbound = self.getintbound(op)
@@ -466,8 +533,15 @@
optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_RAW_I
optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_RAW_I
+ postprocess_GETARRAYITEM_RAW_F = postprocess_GETARRAYITEM_RAW_I
+ postprocess_GETARRAYITEM_GC_I = postprocess_GETARRAYITEM_RAW_I
+ postprocess_GETARRAYITEM_GC_F = postprocess_GETARRAYITEM_RAW_I
+ postprocess_GETARRAYITEM_GC_R = postprocess_GETARRAYITEM_RAW_I
+
def optimize_UNICODEGETITEM(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_UNICODEGETITEM(self, op):
b1 = self.getintbound(op)
b1.make_ge(IntLowerBound(0))
v2 = self.getptrinfo(op.getarg(0))
@@ -632,5 +706,6 @@
dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_',
- default=OptIntBounds.opt_default)
+ default=OptIntBounds.emit)
dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_')
+dispatch_postprocess = make_dispatcher_method(OptIntBounds, 'postprocess_')
diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py
--- a/rpython/jit/metainterp/optimizeopt/optimizer.py
+++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
@@ -41,6 +41,15 @@
pass
+class OptimizationResult(object):
+ def __init__(self, opt, op):
+ self.opt = opt
+ self.op = op
+
+ def callback(self):
+ self.opt.propagate_postprocess(self.op)
+
+
class Optimization(object):
next_optimization = None
potential_extra_ops = None
@@ -48,15 +57,29 @@
def __init__(self):
pass # make rpython happy
- def send_extra_operation(self, op):
- self.optimizer.send_extra_operation(op)
+ def send_extra_operation(self, op, opt=None):
+ self.optimizer.send_extra_operation(op, opt)
def propagate_forward(self, op):
raise NotImplementedError
+ def propagate_postprocess(self, op):
+ pass
+
def emit_operation(self, op):
- self.last_emitted_operation = op
- self.next_optimization.propagate_forward(op)
+ assert False, "This should never be called."
+
+ def emit(self, op):
+ return self.emit_result(OptimizationResult(self, op))
+
+ def emit_result(self, opt_result):
+ self.last_emitted_operation = opt_result.op
+ return opt_result
+
+ def emit_extra(self, op, emit=True):
+ if emit:
+ self.emit(op)
+ self.send_extra_operation(op, self.next_optimization)
def getintbound(self, op):
assert op.type == 'i'
@@ -290,7 +313,7 @@
optimizations = []
self.first_optimization = self
- self.optimizations = optimizations
+ self.optimizations = optimizations
def force_op_from_preamble(self, op):
return op
@@ -524,7 +547,7 @@
if op.getopnum() in (rop.FINISH, rop.JUMP):
last_op = op
break
- self.first_optimization.propagate_forward(op)
+ self.send_extra_operation(op)
trace.kill_cache_at(deadranges[i + trace.start_index])
if op.type != 'v':
i += 1
@@ -532,9 +555,9 @@
if flush:
self.flush()
if last_op:
- self.first_optimization.propagate_forward(last_op)
+ self.send_extra_operation(last_op)
self.resumedata_memo.update_counters(self.metainterp_sd.profiler)
-
+
return (BasicLoopInfo(trace.inputargs, self.quasi_immutable_deps, last_op),
self._newoperations)
@@ -543,13 +566,30 @@
if op.get_forwarded() is not None:
op.set_forwarded(None)
- def send_extra_operation(self, op):
- self.first_optimization.propagate_forward(op)
+ def send_extra_operation(self, op, opt=None):
+ if opt is None:
+ opt = self.first_optimization
+ opt_results = []
+ while opt is not None:
+ opt_result = opt.propagate_forward(op)
+ if opt_result is None:
+ op = None
+ break
+ opt_results.append(opt_result)
+ op = opt_result.op
+ opt = opt.next_optimization
+ for opt_result in reversed(opt_results):
+ opt_result.callback()
def propagate_forward(self, op):
dispatch_opt(self, op)
- def emit_operation(self, op):
+ def emit_extra(self, op, emit=True):
+ # no forwarding, because we're at the end of the chain
+ self.emit(op)
+
+ def emit(self, op):
+ # this actually emits the operation instead of forwarding it
if rop.returns_bool_result(op.opnum):
self.getintbound(op).make_bool()
self._emit_operation(op)
@@ -740,7 +780,7 @@
return op
def optimize_default(self, op):
- self.emit_operation(op)
+ self.emit(op)
def constant_fold(self, op):
self.protect_speculative_operation(op)
@@ -885,14 +925,14 @@
#def optimize_GUARD_NO_OVERFLOW(self, op):
# # otherwise the default optimizer will clear fields, which is unwanted
# # in this case
- # self.emit_operation(op)
+ # self.emit(op)
# FIXME: Is this still needed?
def optimize_DEBUG_MERGE_POINT(self, op):
- self.emit_operation(op)
+ self.emit(op)
def optimize_JIT_DEBUG(self, op):
- self.emit_operation(op)
+ self.emit(op)
def optimize_STRGETITEM(self, op):
indexb = self.getintbound(op.getarg(1))
diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py
--- a/rpython/jit/metainterp/optimizeopt/pure.py
+++ b/rpython/jit/metainterp/optimizeopt/pure.py
@@ -1,4 +1,5 @@
-from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED
+from rpython.jit.metainterp.optimizeopt.optimizer import (
+ Optimization, OptimizationResult, REMOVED)
from rpython.jit.metainterp.resoperation import rop, OpHelpers, AbstractResOp,\
ResOperation
from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method
@@ -6,6 +7,31 @@
from rpython.jit.metainterp.optimize import SpeculativeError
+class DefaultOptimizationResult(OptimizationResult):
+ def __init__(self, opt, op, save, nextop):
+ OptimizationResult.__init__(self, opt, op)
+ self.save = save
+ self.nextop = nextop
+
+ def callback(self):
+ self._callback(self.op, self.save, self.nextop)
+
+ def _callback(self, op, save, nextop):
+ if rop.returns_bool_result(op.opnum):
+ self.opt.getintbound(op).make_bool()
+ if save:
+ recentops = self.opt.getrecentops(op.getopnum())
+ recentops.add(op)
+ if nextop:
+ self.opt.emit_extra(nextop)
+
+
+class CallPureOptimizationResult(OptimizationResult):
+ def callback(self):
+ self.opt.call_pure_positions.append(
+ len(self.opt.optimizer._newoperations) - 1)
+
+
class RecentPureOps(object):
REMEMBER_LIMIT = 16
@@ -72,7 +98,10 @@
self.extra_call_pure = []
def propagate_forward(self, op):
- dispatch_opt(self, op)
+ return dispatch_opt(self, op)
+
+ def propagate_postprocess(self, op):
+ dispatch_postprocess(self, op)
def optimize_default(self, op):
canfold = rop.is_always_pure(op.opnum)
@@ -109,14 +138,7 @@
return
# otherwise, the operation remains
- self.emit_operation(op)
- if rop.returns_bool_result(op.opnum):
- self.getintbound(op).make_bool()
- if save:
- recentops = self.getrecentops(op.getopnum())
- recentops.add(op)
- if nextop:
- self.emit_operation(nextop)
+ return self.emit_result(DefaultOptimizationResult(self, op, save, nextop))
def getrecentops(self, opnum):
if rop._OVF_FIRST <= opnum <= rop._OVF_LAST:
@@ -159,9 +181,7 @@
# replace CALL_PURE with just CALL
opnum = OpHelpers.call_for_descr(op.getdescr())
newop = self.optimizer.replace_op_with(op, opnum)
- self.emit_operation(newop)
- self.call_pure_positions.append(
- len(self.optimizer._newoperations) - 1)
+ return self.emit_result(CallPureOptimizationResult(self, newop))
optimize_CALL_PURE_R = optimize_CALL_PURE_I
optimize_CALL_PURE_F = optimize_CALL_PURE_I
@@ -191,7 +211,7 @@
# it was a CALL_PURE that was killed; so we also kill the
# following GUARD_NO_EXCEPTION
return
- self.emit_operation(op)
+ return self.emit(op)
def flush(self):
assert self.postponed_op is None
@@ -237,3 +257,4 @@
dispatch_opt = make_dispatcher_method(OptPure, 'optimize_',
default=OptPure.optimize_default)
+dispatch_postprocess = make_dispatcher_method(OptPure, 'postprocess_')
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -5,8 +5,8 @@
ConstFloat)
from rpython.jit.metainterp.optimize import InvalidLoop
from rpython.jit.metainterp.optimizeopt.intutils import IntBound
-from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED,
- CONST_0, CONST_1)
+from rpython.jit.metainterp.optimizeopt.optimizer import (
+ Optimization, OptimizationResult, REMOVED, CONST_0, CONST_1)
from rpython.jit.metainterp.optimizeopt.info import INFO_NONNULL, INFO_NULL
from rpython.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method
from rpython.jit.metainterp.resoperation import rop, ResOperation, opclasses,\
@@ -16,6 +16,21 @@
from rpython.rtyper import rclass
import math
+
+class CallLoopinvariantOptimizationResult(OptimizationResult):
+ def __init__(self, opt, op, old_op):
+ OptimizationResult.__init__(self, opt, op)
+ self.old_op = old_op
+
+ def callback(self):
+ self._callback(self.op, self.old_op)
+
+ def _callback(self, op, old_op):
+ key = make_hashable_int(op.getarg(0).getint())
+ self.opt.loop_invariant_producer[key] = self.opt.optimizer.getlastop()
+ self.opt.loop_invariant_results[key] = old_op
+
+
class OptRewrite(Optimization):
"""Rewrite operations into equivalent, cheaper operations.
This includes already executed operations and constants.
@@ -36,7 +51,10 @@
if self.find_rewritable_bool(op):
return
- dispatch_opt(self, op)
+ return dispatch_opt(self, op)
+
+ def propagate_postprocess(self, op):
+ return dispatch_postprocess(self, op)
def try_boolinvers(self, op, targs):
oldop = self.get_pure_result(targs)
@@ -97,7 +115,7 @@
self.make_equal_to(op, op.getarg(1))
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_OR(self, op):
b1 = self.getintbound(op.getarg(0))
@@ -107,7 +125,7 @@
elif b2.equal(0):
self.make_equal_to(op, op.getarg(0))
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_SUB(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
@@ -118,17 +136,18 @@
self.make_equal_to(op, arg1)
elif b1.equal(0):
op = self.replace_op_with(op, rop.INT_NEG, args=[arg2])
- self.emit_operation(op)
+ return self.emit(op)
elif arg1 == arg2:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
- self.optimizer.pure_reverse(op)
+ return self.emit(op)
+
+ def postprocess_INT_SUB(self, op):
+ self.optimizer.pure_reverse(op)
def optimize_INT_ADD(self, op):
if self.is_raw_ptr(op.getarg(0)) or self.is_raw_ptr(op.getarg(1)):
- self.emit_operation(op)
- return
+ return self.emit(op)
arg1 = self.get_box_replacement(op.getarg(0))
b1 = self.getintbound(arg1)
arg2 = self.get_box_replacement(op.getarg(1))
@@ -140,8 +159,10 @@
elif b2.equal(0):
self.make_equal_to(op, arg1)
else:
- self.emit_operation(op)
- self.optimizer.pure_reverse(op)
+ return self.emit(op)
+
+ def postprocess_INT_ADD(self, op):
+ self.optimizer.pure_reverse(op)
def optimize_INT_MUL(self, op):
arg1 = self.get_box_replacement(op.getarg(0))
@@ -166,7 +187,7 @@
new_rhs = ConstInt(highest_bit(lh_info.getint()))
op = self.replace_op_with(op, rop.INT_LSHIFT, args=[rhs, new_rhs])
break
- self.emit_operation(op)
+ return self.emit(op)
def _optimize_CALL_INT_UDIV(self, op):
b2 = self.getintbound(op.getarg(2))
@@ -185,7 +206,7 @@
elif b1.is_constant() and b1.getint() == 0:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_RSHIFT(self, op):
b1 = self.getintbound(op.getarg(0))
@@ -196,7 +217,7 @@
elif b1.is_constant() and b1.getint() == 0:
self.make_constant_int(op, 0)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_XOR(self, op):
b1 = self.getintbound(op.getarg(0))
@@ -207,7 +228,7 @@
elif b2.equal(0):
self.make_equal_to(op, op.getarg(0))
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_FLOAT_MUL(self, op):
arg1 = op.getarg(0)
@@ -225,9 +246,10 @@
return
elif v1.getfloat() == -1.0:
newop = self.replace_op_with(op, rop.FLOAT_NEG, args=[rhs])
- self.emit_operation(newop)
- return
- self.emit_operation(op)
+ return self.emit(newop)
+ return self.emit(op)
+
+ def postprocess_FLOAT_MUL(self, op):
self.optimizer.pure_reverse(op)
def optimize_FLOAT_TRUEDIV(self, op):
@@ -249,13 +271,15 @@
c = ConstFloat(longlong.getfloatstorage(reciprocal))
newop = self.replace_op_with(op, rop.FLOAT_MUL,
args=[arg1, c])
- self.emit_operation(newop)
+ return self.emit(newop)
def optimize_FLOAT_NEG(self, op):
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_FLOAT_NEG(self, op):
self.optimizer.pure_reverse(op)
- def optimize_guard(self, op, constbox, emit_operation=True):
+ def optimize_guard(self, op, constbox):
box = op.getarg(0)
if box.type == 'i':
intbound = self.getintbound(box)
@@ -275,12 +299,8 @@
raise InvalidLoop('A GUARD_VALUE (%s) '
'was proven to always fail' % r)
return
-
- if emit_operation:
- self.emit_operation(op)
- self.make_constant(box, constbox)
- #if self.optimizer.optheap: XXX
- # self.optimizer.optheap.value_updated(value, self.getvalue(constbox))
+
+ return self.emit(op)
def optimize_GUARD_ISNULL(self, op):
info = self.getptrinfo(op.getarg(0))
@@ -291,7 +311,9 @@
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op)
raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always '
'fail' % r)
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_GUARD_ISNULL(self, op):
self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL)
def optimize_GUARD_IS_OBJECT(self, op):
@@ -308,7 +330,7 @@
return
if info.is_precise():
raise InvalidLoop()
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_GC_TYPE(self, op):
info = self.getptrinfo(op.getarg(0))
@@ -322,7 +344,7 @@
if info.get_descr().get_type_id() != op.getarg(1).getint():
raise InvalidLoop("wrong GC types passed around!")
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_SUBCLASS(self, op):
info = self.getptrinfo(op.getarg(0))
@@ -343,7 +365,7 @@
if optimizer._check_subclass(info.get_descr().get_vtable(),
op.getarg(1).getint()):
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_NONNULL(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -354,7 +376,9 @@
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op)
raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always '
'fail' % r)
- self.emit_operation(op)
+ return self.emit(op)
+
+ def postprocess_GUARD_NONNULL(self, op):
self.make_nonnull(op.getarg(0))
self.getptrinfo(op.getarg(0)).mark_last_guard(self.optimizer)
@@ -375,7 +399,11 @@
return
constbox = op.getarg(1)
assert isinstance(constbox, Const)
- self.optimize_guard(op, constbox)
+ return self.optimize_guard(op, constbox)
+
+ def postprocess_GUARD_VALUE(self, op):
+ box = self.get_box_replacement(op.getarg(0))
+ self.make_constant(box, op.getarg(1))
def replace_old_guard_with_guard_value(self, op, info, old_guard_op):
# there already has been a guard_nonnull or guard_class or
@@ -418,10 +446,18 @@
return op
def optimize_GUARD_TRUE(self, op):
- self.optimize_guard(op, CONST_1)
+ return self.optimize_guard(op, CONST_1)
+
+ def postprocess_GUARD_TRUE(self, op):
+ box = self.get_box_replacement(op.getarg(0))
+ self.make_constant(box, CONST_1)
def optimize_GUARD_FALSE(self, op):
- self.optimize_guard(op, CONST_0)
+ return self.optimize_guard(op, CONST_0)
+
+ def postprocess_GUARD_FALSE(self, op):
+ box = self.get_box_replacement(op.getarg(0))
+ self.make_constant(box, CONST_0)
def optimize_RECORD_EXACT_CLASS(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -464,11 +500,16 @@
# not put in short preambles guard_nonnull and guard_class
# on the same box.
self.optimizer.replace_guard(op, info)
- self.emit_operation(op)
- self.make_constant_class(op.getarg(0), expectedclassbox, False)
- return
- self.emit_operation(op)
- self.make_constant_class(op.getarg(0), expectedclassbox)
+ return self.emit(op)
+ return self.emit(op)
+
+ def postprocess_GUARD_CLASS(self, op):
+ expectedclassbox = op.getarg(1)
+ info = self.getptrinfo(op.getarg(0))
+ old_guard_op = info.get_last_guard(self.optimizer)
+ update_last_guard = not old_guard_op or isinstance(
+ old_guard_op.getdescr(), compile.ResumeAtPositionDescr)
+ self.make_constant_class(op.getarg(0), expectedclassbox, update_last_guard)
def optimize_GUARD_NONNULL_CLASS(self, op):
info = self.getptrinfo(op.getarg(0))
@@ -476,7 +517,9 @@
r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op)
raise InvalidLoop('A GUARD_NONNULL_CLASS (%s) was proven to '
'always fail' % r)
- self.optimize_GUARD_CLASS(op)
+ return self.optimize_GUARD_CLASS(op)
+
+ postprocess_GUARD_NONNULL_CLASS = postprocess_GUARD_CLASS
def optimize_CALL_LOOPINVARIANT_I(self, op):
arg = op.getarg(0)
@@ -496,9 +539,8 @@
# there is no reason to have a separate operation for this
newop = self.replace_op_with(op,
OpHelpers.call_for_descr(op.getdescr()))
- self.emit_operation(newop)
- self.loop_invariant_producer[key] = self.optimizer.getlastop()
- self.loop_invariant_results[key] = op
+ return self.emit_result(CallLoopinvariantOptimizationResult(self, newop, op))
+
optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I
optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I
optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I
@@ -512,7 +554,7 @@
return
opnum = OpHelpers.call_for_type(op.type)
op = op.copy_and_change(opnum, args=op.getarglist()[1:])
- self.emit_operation(op)
+ return self.emit(op)
def _optimize_nullness(self, op, box, expect_nonnull):
info = self.getnullness(box)
@@ -521,17 +563,17 @@
elif info == INFO_NULL:
self.make_constant_int(op, not expect_nonnull)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_IS_TRUE(self, op):
if (not self.is_raw_ptr(op.getarg(0)) and
self.getintbound(op.getarg(0)).is_bool()):
self.make_equal_to(op, op.getarg(0))
return
- self._optimize_nullness(op, op.getarg(0), True)
+ return self._optimize_nullness(op, op.getarg(0), True)
def optimize_INT_IS_ZERO(self, op):
- self._optimize_nullness(op, op.getarg(0), False)
+ return self._optimize_nullness(op, op.getarg(0), False)
def _optimize_oois_ooisnot(self, op, expect_isnot, instance):
arg0 = self.get_box_replacement(op.getarg(0))
@@ -547,9 +589,9 @@
elif info1 and info1.is_virtual():
self.make_constant_int(op, expect_isnot)
elif info1 and info1.is_null():
- self._optimize_nullness(op, op.getarg(0), expect_isnot)
+ return self._optimize_nullness(op, op.getarg(0), expect_isnot)
elif info0 and info0.is_null():
- self._optimize_nullness(op, op.getarg(1), expect_isnot)
+ return self._optimize_nullness(op, op.getarg(1), expect_isnot)
elif arg0 is arg1:
self.make_constant_int(op, not expect_isnot)
else:
@@ -568,19 +610,19 @@
# class is different
self.make_constant_int(op, expect_isnot)
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_PTR_EQ(self, op):
- self._optimize_oois_ooisnot(op, False, False)
+ return self._optimize_oois_ooisnot(op, False, False)
def optimize_PTR_NE(self, op):
- self._optimize_oois_ooisnot(op, True, False)
+ return self._optimize_oois_ooisnot(op, True, False)
def optimize_INSTANCE_PTR_EQ(self, op):
- self._optimize_oois_ooisnot(op, False, True)
+ return self._optimize_oois_ooisnot(op, False, True)
def optimize_INSTANCE_PTR_NE(self, op):
- self._optimize_oois_ooisnot(op, True, True)
+ return self._optimize_oois_ooisnot(op, True, True)
def optimize_CALL_N(self, op):
# dispatch based on 'oopspecindex' to a method that handles
@@ -589,14 +631,13 @@
effectinfo = op.getdescr().get_extra_info()
oopspecindex = effectinfo.oopspecindex
if oopspecindex == EffectInfo.OS_ARRAYCOPY:
- if self._optimize_CALL_ARRAYCOPY(op):
- return
- self.emit_operation(op)
+ return self._optimize_CALL_ARRAYCOPY(op)
+ return self.emit(op)
def _optimize_CALL_ARRAYCOPY(self, op):
length = self.get_constant_box(op.getarg(5))
if length and length.getint() == 0:
- return True # 0-length arraycopy
+ return None # 0-length arraycopy
source_info = self.getptrinfo(op.getarg(1))
dest_info = self.getptrinfo(op.getarg(2))
@@ -612,7 +653,7 @@
dest_start = dest_start_box.getint()
arraydescr = extrainfo.single_write_descr_array
if arraydescr.is_array_of_structs():
- return False # not supported right now
+ return self.emit(op) # not supported right now
# XXX fish fish fish
for index in range(length.getint()):
@@ -638,9 +679,9 @@
ConstInt(index + dest_start),
val],
descr=arraydescr)
- self.emit_operation(newop)
- return True
- return False
+ self.optimizer.send_extra_operation(newop)
+ return None
+ return self.emit(op)
def optimize_CALL_PURE_I(self, op):
# this removes a CALL_PURE with all constant arguments.
@@ -650,6 +691,7 @@
self.make_constant(op, result)
self.last_emitted_operation = REMOVED
return
+
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call.
effectinfo = op.getdescr().get_extra_info()
@@ -663,7 +705,7 @@
elif oopspecindex == EffectInfo.OS_INT_PY_MOD:
if self._optimize_CALL_INT_PY_MOD(op):
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_PURE_R = optimize_CALL_PURE_I
optimize_CALL_PURE_F = optimize_CALL_PURE_I
optimize_CALL_PURE_N = optimize_CALL_PURE_I
@@ -673,7 +715,7 @@
# it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed;
# so we also kill the following GUARD_NO_EXCEPTION
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_FUTURE_CONDITION(self, op):
self.optimizer.notice_guard_future_condition(op)
@@ -758,11 +800,11 @@
def optimize_CAST_PTR_TO_INT(self, op):
self.optimizer.pure_reverse(op)
- self.emit_operation(op)
+ return self.emit(op)
def optimize_CAST_INT_TO_PTR(self, op):
self.optimizer.pure_reverse(op)
- self.emit_operation(op)
+ return self.emit(op)
def optimize_SAME_AS_I(self, op):
self.make_equal_to(op, op.getarg(0))
@@ -770,5 +812,6 @@
optimize_SAME_AS_F = optimize_SAME_AS_I
dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_',
- default=OptRewrite.emit_operation)
+ default=OptRewrite.emit)
optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
+dispatch_postprocess = make_dispatcher_method(OptRewrite, 'postprocess_')
diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py
--- a/rpython/jit/metainterp/optimizeopt/simplify.py
+++ b/rpython/jit/metainterp/optimizeopt/simplify.py
@@ -8,16 +8,16 @@
self.last_label_descr = None
self.unroll = unroll
- def emit_operation(self, op):
+ def emit(self, op):
if op.is_guard():
if self.optimizer.pendingfields is None:
self.optimizer.pendingfields = []
- Optimization.emit_operation(self, op)
+ return Optimization.emit(self, op)
def optimize_CALL_PURE_I(self, op):
opnum = OpHelpers.call_for_descr(op.getdescr())
newop = self.optimizer.replace_op_with(op, opnum)
- self.emit_operation(newop)
+ return self.emit(newop)
optimize_CALL_PURE_R = optimize_CALL_PURE_I
optimize_CALL_PURE_F = optimize_CALL_PURE_I
optimize_CALL_PURE_N = optimize_CALL_PURE_I
@@ -25,7 +25,7 @@
def optimize_CALL_LOOPINVARIANT_I(self, op):
opnum = OpHelpers.call_for_descr(op.getdescr())
op = op.copy_and_change(opnum)
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I
optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I
optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I
@@ -35,7 +35,7 @@
def optimize_VIRTUAL_REF(self, op):
newop = self.replace_op_with(op, rop.SAME_AS_R, [op.getarg(0)])
- self.emit_operation(newop)
+ return self.emit(newop)
def optimize_QUASIIMMUT_FIELD(self, op):
# xxx ideally we could also kill the following GUARD_NOT_INVALIDATED
@@ -51,7 +51,7 @@
# if isinstance(descr, JitCellToken):
# return self.optimize_JUMP(op.copy_and_change(rop.JUMP))
# self.last_label_descr = op.getdescr()
- # self.emit_operation(op)
+ # return self.emit(op)
# def optimize_JUMP(self, op):
# if not self.unroll:
@@ -67,11 +67,11 @@
# else:
# assert len(descr.target_tokens) == 1
# op.setdescr(descr.target_tokens[0])
- # self.emit_operation(op)
+ # return self.emit(op)
def optimize_GUARD_FUTURE_CONDITION(self, op):
self.optimizer.notice_guard_future_condition(op)
dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_',
- default=OptSimplify.emit_operation)
+ default=OptSimplify.emit)
OptSimplify.propagate_forward = dispatch_opt
diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py
--- a/rpython/jit/metainterp/optimizeopt/virtualize.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualize.py
@@ -13,6 +13,7 @@
"Virtualize objects until they escape."
_last_guard_not_forced_2 = None
+ _finish_guard_op = None
def make_virtual(self, known_class, source_op, descr):
opinfo = info.InstancePtrInfo(descr, known_class, is_virtual=True)
@@ -62,26 +63,27 @@
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_NOT_FORCED(self, op):
if self.last_emitted_operation is REMOVED:
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GUARD_NOT_FORCED_2(self, op):
self._last_guard_not_forced_2 = op
def optimize_FINISH(self, op):
- if self._last_guard_not_forced_2 is not None:
- guard_op = self._last_guard_not_forced_2
- self.emit_operation(op)
+ self._finish_guard_op = self._last_guard_not_forced_2
+ return self.emit(op)
+
+ def postprocess_FINISH(self, op):
+ guard_op = self._finish_guard_op
+ if guard_op is not None:
guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, [])
i = len(self.optimizer._newoperations) - 1
assert i >= 0
self.optimizer._newoperations.insert(i, guard_op)
- else:
- self.emit_operation(op)
def optimize_CALL_MAY_FORCE_I(self, op):
effectinfo = op.getdescr().get_extra_info()
@@ -89,7 +91,7 @@
if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL:
if self._optimize_JIT_FORCE_VIRTUAL(op):
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_MAY_FORCE_R = optimize_CALL_MAY_FORCE_I
optimize_CALL_MAY_FORCE_F = optimize_CALL_MAY_FORCE_I
optimize_CALL_MAY_FORCE_N = optimize_CALL_MAY_FORCE_I
@@ -101,7 +103,7 @@
opinfo = self.getptrinfo(op.getarg(2))
if opinfo and opinfo.is_virtual():
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_VIRTUAL_REF(self, op):
# get some constants
@@ -119,10 +121,10 @@
op.set_forwarded(newop)
newop.set_forwarded(vrefvalue)
token = ResOperation(rop.FORCE_TOKEN, [])
- self.emit_operation(token)
vrefvalue.setfield(descr_virtual_token, newop, token)
vrefvalue.setfield(descr_forced, newop,
self.optimizer.cpu.ts.CONST_NULLREF)
+ return self.emit(token)
def optimize_VIRTUAL_REF_FINISH(self, op):
# This operation is used in two cases. In normal cases, it
@@ -185,7 +187,7 @@
self.make_equal_to(op, fieldop)
else:
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I
optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I
@@ -197,7 +199,7 @@
self.get_box_replacement(op.getarg(1)))
else:
self.make_nonnull(struct)
- self.emit_operation(op)
+ return self.emit(op)
def optimize_NEW_WITH_VTABLE(self, op):
known_class = ConstInt(op.getdescr().get_vtable())
@@ -211,36 +213,35 @@
if sizebox is not None:
self.make_varray(op.getdescr(), sizebox.getint(), op)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_NEW_ARRAY_CLEAR(self, op):
sizebox = self.get_constant_box(op.getarg(0))
if sizebox is not None:
self.make_varray(op.getdescr(), sizebox.getint(), op, clear=True)
else:
- self.emit_operation(op)
+ return self.emit(op)
def optimize_CALL_N(self, op):
effectinfo = op.getdescr().get_extra_info()
if effectinfo.oopspecindex == EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR:
- self.do_RAW_MALLOC_VARSIZE_CHAR(op)
+ return self.do_RAW_MALLOC_VARSIZE_CHAR(op)
elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE:
- self.do_RAW_FREE(op)
+ return self.do_RAW_FREE(op)
elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE:
# we might end up having CALL here instead of COND_CALL
info = self.getptrinfo(op.getarg(1))
if info and info.is_virtual():
return
else:
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_R = optimize_CALL_N
optimize_CALL_I = optimize_CALL_N
def do_RAW_MALLOC_VARSIZE_CHAR(self, op):
sizebox = self.get_constant_box(op.getarg(1))
if sizebox is None:
- self.emit_operation(op)
- return
+ return self.emit(op)
self.make_virtual_raw_memory(sizebox.getint(), op)
self.last_emitted_operation = REMOVED
@@ -248,7 +249,7 @@
opinfo = self.getrawptrinfo(op.getarg(1))
if opinfo and opinfo.is_virtual():
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_INT_ADD(self, op):
opinfo = self.getrawptrinfo(op.getarg(0), create=False)
@@ -261,7 +262,7 @@
isinstance(opinfo, info.RawSlicePtrInfo)):
self.make_virtual_raw_slice(offset, opinfo, op)
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_ARRAYLEN_GC(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -269,7 +270,7 @@
self.make_constant_int(op, opinfo.getlength())
else:
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GETARRAYITEM_GC_I(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -283,7 +284,7 @@
self.make_equal_to(op, item)
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I
optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I
@@ -303,7 +304,7 @@
self.get_box_replacement(op.getarg(2)))
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
def _unpack_arrayitem_raw_op(self, op, indexbox):
index = indexbox.getint()
@@ -328,7 +329,7 @@
self.make_equal_to(op, itemvalue)
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I
def optimize_SETARRAYITEM_RAW(self, op):
@@ -344,7 +345,7 @@
except InvalidRawOperation:
pass
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
def _unpack_raw_load_store_op(self, op, offsetbox):
offset = offsetbox.getint()
@@ -366,7 +367,7 @@
else:
self.make_equal_to(op, itemop)
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_RAW_LOAD_F = optimize_RAW_LOAD_I
def optimize_RAW_STORE(self, op):
@@ -380,7 +381,7 @@
return
except InvalidRawOperation:
pass
- self.emit_operation(op)
+ return self.emit(op)
def optimize_GETINTERIORFIELD_GC_I(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -396,7 +397,7 @@
self.make_equal_to(op, fld)
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I
optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I
@@ -410,10 +411,12 @@
self.get_box_replacement(op.getarg(2)))
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_',
- default=OptVirtualize.emit_operation)
+ default=OptVirtualize.emit)
OptVirtualize.propagate_forward = dispatch_opt
+dispatch_postprocess = make_dispatcher_method(OptVirtualize, 'postprocess_')
+OptVirtualize.propagate_postprocess = dispatch_postprocess
diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py
--- a/rpython/jit/metainterp/optimizeopt/vstring.py
+++ b/rpython/jit/metainterp/optimizeopt/vstring.py
@@ -97,7 +97,7 @@
newop = ResOperation(self.mode.NEWSTR, [lengthbox])
if not we_are_translated():
newop.name = 'FORCE'
- optforce.emit_operation(newop)
+ optforce.emit_extra(newop)
newop = optforce.getlastop()
newop.set_forwarded(self)
op = optforce.get_box_replacement(op)
@@ -120,7 +120,7 @@
lengthop = ResOperation(mode.STRLEN, [op])
lengthop.set_forwarded(self.getlenbound(mode))
self.lgtop = lengthop
- string_optimizer.emit_operation(lengthop)
+ string_optimizer.emit_extra(lengthop)
return lengthop
def make_guards(self, op, short, optimizer):
@@ -204,7 +204,7 @@
op = ResOperation(mode.STRSETITEM, [targetbox,
offsetbox,
charbox])
- string_optimizer.emit_operation(op)
+ string_optimizer.emit_extra(op)
offsetbox = _int_add(string_optimizer, offsetbox, CONST_1)
return offsetbox
@@ -356,7 +356,7 @@
mode)
srcoffsetbox = _int_add(string_optimizer, srcoffsetbox, CONST_1)
assert not isinstance(targetbox, Const)# ConstPtr never makes sense
- string_optimizer.emit_operation(ResOperation(mode.STRSETITEM,
+ string_optimizer.emit_extra(ResOperation(mode.STRSETITEM,
[targetbox, offsetbox, charbox]))
offsetbox = _int_add(string_optimizer, offsetbox, CONST_1)
else:
@@ -368,7 +368,7 @@
op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox,
srcoffsetbox, offsetbox,
lengthbox])
- string_optimizer.emit_operation(op)
+ string_optimizer.emit_extra(op)
offsetbox = nextoffsetbox
return offsetbox
@@ -412,7 +412,7 @@
else:
resbox = string_optimizer.replace_op_with(resbox, mode.STRGETITEM,
[strbox, indexbox])
- string_optimizer.emit_operation(resbox)
+ string_optimizer.emit_extra(resbox)
return resbox
@@ -422,6 +422,12 @@
def setup(self):
self.optimizer.optstring = self
+ def propagate_forward(self, op):
+ return dispatch_opt(self, op)
+
+ def propagate_postprocess(self, op):
+ return dispatch_postprocess(self, op)
+
def make_vstring_plain(self, op, mode, length):
vvalue = VStringPlainInfo(mode, True, length)
op = self.replace_op_with(op, op.getopnum())
@@ -441,9 +447,9 @@
return vvalue
def optimize_NEWSTR(self, op):
- self._optimize_NEWSTR(op, mode_string)
+ return self._optimize_NEWSTR(op, mode_string)
def optimize_NEWUNICODE(self, op):
- self._optimize_NEWSTR(op, mode_unicode)
+ return self._optimize_NEWSTR(op, mode_unicode)
def _optimize_NEWSTR(self, op, mode):
length_box = self.get_constant_box(op.getarg(0))
@@ -452,8 +458,13 @@
self.make_vstring_plain(op, mode, length_box.getint())
else:
self.make_nonnull_str(op, mode)
- self.emit_operation(op)
- self.pure_from_args(mode.STRLEN, [op], op.getarg(0))
+ return self.emit(op)
+
+ def postprocess_NEWSTR(self, op):
+ self.pure_from_args(mode_string.STRLEN, [op], op.getarg(0))
+
+ def postprocess_NEWUNICODE(self, op):
+ self.pure_from_args(mode_unicode.STRLEN, [op], op.getarg(0))
def optimize_STRSETITEM(self, op):
opinfo = self.getptrinfo(op.getarg(0))
@@ -464,17 +475,17 @@
indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
opinfo.strsetitem(indexbox.getint(),
- self.get_box_replacement(op.getarg(2)))
+ self.get_box_replacement(op.getarg(2)))
return
self.make_nonnull(op.getarg(0))
- self.emit_operation(op)
+ return self.emit(op)
optimize_UNICODESETITEM = optimize_STRSETITEM
def optimize_STRGETITEM(self, op):
- self._optimize_STRGETITEM(op, mode_string)
+ return self._optimize_STRGETITEM(op, mode_string)
def optimize_UNICODEGETITEM(self, op):
- self._optimize_STRGETITEM(op, mode_unicode)
+ return self._optimize_STRGETITEM(op, mode_unicode)
def _optimize_STRGETITEM(self, op, mode):
self.strgetitem(op, op.getarg(0), op.getarg(1), mode)
@@ -514,9 +525,9 @@
return _strgetitem(self, s, index, mode, op)
def optimize_STRLEN(self, op):
- self._optimize_STRLEN(op, mode_string)
+ return self._optimize_STRLEN(op, mode_string)
def optimize_UNICODELEN(self, op):
- self._optimize_STRLEN(op, mode_unicode)
+ return self._optimize_STRLEN(op, mode_unicode)
def _optimize_STRLEN(self, op, mode):
opinfo = self.getptrinfo(op.getarg(0))
@@ -525,13 +536,13 @@
if lgtop is not None:
self.make_equal_to(op, lgtop)
return
- self.emit_operation(op)
+ return self.emit(op)
def optimize_COPYSTRCONTENT(self, op):
- self._optimize_COPYSTRCONTENT(op, mode_string)
+ return self._optimize_COPYSTRCONTENT(op, mode_string)
def optimize_COPYUNICODECONTENT(self, op):
- self._optimize_COPYSTRCONTENT(op, mode_unicode)
+ return self._optimize_COPYSTRCONTENT(op, mode_unicode)
def _optimize_COPYSTRCONTENT(self, op, mode):
# args: src dst srcstart dststart length
@@ -566,7 +577,7 @@
op.getarg(1), ConstInt(index + dst_start),
vresult,
])
- self.emit_operation(new_op)
+ self.emit_extra(new_op)
else:
copy_str_content(self, op.getarg(0), op.getarg(1), op.getarg(2),
op.getarg(3), op.getarg(4), mode,
@@ -581,13 +592,15 @@
if oopspecindex != EffectInfo.OS_NONE:
for value, meth in opt_call_oopspec_ops:
if oopspecindex == value: # a match with the OS_STR_xxx
- if meth(self, op, mode_string):
- return
+ handled, newop = meth(self, op, mode_string)
+ if handled:
+ return newop
break
if oopspecindex == value + EffectInfo._OS_offset_uni:
# a match with the OS_UNI_xxx
- if meth(self, op, mode_unicode):
- return
+ handled, newop = meth(self, op, mode_unicode)
+ if handled:
+ return newop
break
if oopspecindex == EffectInfo.OS_STR2UNICODE:
if self.opt_call_str_STR2UNICODE(op):
@@ -595,7 +608,7 @@
if oopspecindex == EffectInfo.OS_SHRINK_ARRAY:
if self.opt_call_SHRINK_ARRAY(op):
return
- self.emit_operation(op)
+ return self.emit(op)
optimize_CALL_R = optimize_CALL_I
optimize_CALL_F = optimize_CALL_I
optimize_CALL_N = optimize_CALL_I
@@ -607,7 +620,7 @@
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
return
- self.emit_operation(op)
+ return self.emit(op)
def opt_call_str_STR2UNICODE(self, op):
# Constant-fold unicode("constant string").
@@ -635,7 +648,7 @@
self.get_box_replacement(op.getarg(1)),
self.get_box_replacement(op.getarg(2)))
self.last_emitted_operation = REMOVED
- return True
+ return True, None
def opt_call_stroruni_STR_SLICE(self, op, mode):
self.make_nonnull_str(op.getarg(1), mode)
@@ -651,7 +664,7 @@
# value = self.make_vstring_plain(op, mode, -1)
# value.setup_slice(vstr._chars, vstart.getint(),
# vstop.getint())
- # return True
+ # return True, None
#
startbox = op.getarg(2)
strbox = op.getarg(1)
@@ -664,7 +677,7 @@
#
self.make_vstring_slice(op, strbox, startbox, mode, lengthbox)
self.last_emitted_operation = REMOVED
- return True
+ return True, None
@specialize.arg(2)
def opt_call_stroruni_STR_EQUAL(self, op, mode):
@@ -687,25 +700,28 @@
l1box.value != l2box.value):
# statically known to have a different length
self.make_constant(op, CONST_0)
- return True
+ return True, None
#
- if self.handle_str_equal_level1(arg1, arg2, op, mode):
- return True
- if self.handle_str_equal_level1(arg2, arg1, op, mode):
- return True
- if self.handle_str_equal_level2(arg1, arg2, op, mode):
- return True
- if self.handle_str_equal_level2(arg2, arg1, op, mode):
- return True
+ handled, result = self.handle_str_equal_level1(arg1, arg2, op, mode)
+ if handled:
+ return True, result
+ handled, result = self.handle_str_equal_level1(arg2, arg1, op, mode)
+ if handled:
+ return True, result
+ handled, result = self.handle_str_equal_level2(arg1, arg2, op, mode)
+ if handled:
+ return True, result
+ handled, result = self.handle_str_equal_level2(arg2, arg1, op, mode)
+ if handled:
+ return True, result
#
if i1 and i1.is_nonnull() and i2 and i2.is_nonnull():
if l1box is not None and l2box is not None and l1box.same_box(l2box):
do = EffectInfo.OS_STREQ_LENGTHOK
else:
do = EffectInfo.OS_STREQ_NONNULL
- self.generate_modified_call(do, [arg1, arg2], op, mode)
- return True
- return False
+ return True, self.generate_modified_call(do, [arg1, arg2], op, mode)
+ return False, None
def handle_str_equal_level1(self, arg1, arg2, resultop, mode):
i1 = self.getptrinfo(arg1)
@@ -728,7 +744,7 @@
[lengthbox, CONST_0],
descr=DONT_CHANGE)
seo(op)
- return True
+ return True, None
if l2box.value == 1:
if i1:
l1box = i1.getstrlen(arg1, self, mode, False)
@@ -742,30 +758,28 @@
op = self.optimizer.replace_op_with(resultop, rop.INT_EQ,
[vchar1, vchar2], descr=DONT_CHANGE)
seo(op)
- return True
+ return True, None
if isinstance(i1, VStringSliceInfo):
vchar = self.strgetitem(None, arg2, optimizer.CONST_0,
mode)
do = EffectInfo.OS_STREQ_SLICE_CHAR
- self.generate_modified_call(do, [i1.s, i1.start,
- i1.lgtop, vchar],
- resultop, mode)
- return True
+ return True, self.generate_modified_call(do, [i1.s, i1.start,
+ i1.lgtop, vchar],
+ resultop, mode)
#
if i2 and i2.is_null():
if i1 and i1.is_nonnull():
self.make_constant(resultop, CONST_0)
- return True
+ return True, None
if i1 and i1.is_null():
self.make_constant(resultop, CONST_1)
- return True
+ return True, None
op = self.optimizer.replace_op_with(resultop, rop.PTR_EQ,
[arg1, llhelper.CONST_NULL],
More information about the pypy-commit
mailing list