[pypy-svn] r77107 - in pypy/branch/resoperation-refactoring/pypy/jit: backend/llgraph metainterp metainterp/optimizeopt metainterp/test
david at codespeak.net
david at codespeak.net
Thu Sep 16 13:52:42 CEST 2010
Author: david
Date: Thu Sep 16 13:52:39 2010
New Revision: 77107
Modified:
pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py
pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py
Log:
(david, antocuni) Initial interface changes for resoperation
Modified: pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/backend/llgraph/runner.py Thu Sep 16 13:52:39 2010
@@ -160,7 +160,8 @@
if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
# hack hack, not rpython
c._obj.externalobj.operations[-1].descr = descr
- for x in op.args:
+ for i in range(op.numargs()):
+ x = op.getarg(i)
if isinstance(x, history.Box):
llimpl.compile_add_var(c, var2index[x])
elif isinstance(x, history.ConstInt):
@@ -280,7 +281,7 @@
def __init__(self, *args, **kwds):
BaseCPU.__init__(self, *args, **kwds)
self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr')
-
+
def fielddescrof(self, S, fieldname):
ofs, size = symbolic.get_field_token(S, fieldname)
token = history.getkind(getattr(S, fieldname))
@@ -504,7 +505,7 @@
return ootype.cast_to_object(e)
else:
return ootype.NULL
-
+
def get_exc_value(self):
if llimpl._last_exception:
earg = llimpl._last_exception.args[1]
@@ -580,7 +581,7 @@
x = descr.callmeth(selfbox, argboxes)
# XXX: return None if METH.RESULT is Void
return x
-
+
def make_getargs(ARGS):
argsiter = unrolling_iterable(ARGS)
@@ -612,7 +613,7 @@
class KeyManager(object):
"""
Helper class to convert arbitrary dictionary keys to integers.
- """
+ """
def __init__(self):
self.keys = {}
@@ -695,7 +696,7 @@
self.ARRAY = ARRAY = ootype.Array(TYPE)
def create():
return boxresult(TYPE, ootype.new(TYPE))
-
+
def create_array(lengthbox):
n = lengthbox.getint()
return boxresult(ARRAY, ootype.oonewarray(ARRAY, n))
@@ -757,7 +758,7 @@
obj = objbox.getref(TYPE)
value = unwrap(T, valuebox)
setattr(obj, fieldname, value)
-
+
self.getfield = getfield
self.setfield = setfield
self._is_pointer_field = (history.getkind(T) == 'ref')
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/compile.py Thu Sep 16 13:52:39 2010
@@ -51,7 +51,7 @@
def compile_new_loop(metainterp, old_loop_tokens, greenkey, start):
"""Try to compile a new loop by closing the current history back
to the first operation.
- """
+ """
history = metainterp.history
loop = create_empty_loop(metainterp)
loop.greenkey = greenkey
@@ -133,7 +133,7 @@
metainterp_sd.profiler.end_backend()
if not we_are_translated():
metainterp_sd.stats.compiled()
- metainterp_sd.log("compiled new bridge")
+ metainterp_sd.log("compiled new bridge")
# ____________________________________________________________
@@ -177,7 +177,7 @@
class TerminatingLoopToken(LoopToken):
terminating = True
-
+
def __init__(self, nargs, finishdescr):
self.specnodes = [prebuiltNotSpecNode]*nargs
self.finishdescr = finishdescr
@@ -508,7 +508,7 @@
def compile_new_bridge(metainterp, old_loop_tokens, resumekey):
"""Try to compile a new bridge leading from the beginning of the history
to some existing place.
- """
+ """
# The history contains new operations to attach as the code for the
# failure of 'resumekey.guard_op'.
#
@@ -546,7 +546,8 @@
# e.g. loop_tokens_done_with_this_frame_void[0]
# Replace the operation with the real operation we want, i.e. a FINISH
descr = target_loop_token.finishdescr
- new_op = ResOperation(rop.FINISH, op.args, None, descr=descr)
+ args = [op.getarg(i) for i in range(op.numargs())]
+ new_op = ResOperation(rop.FINISH, args, None, descr=descr)
new_loop.operations[-1] = new_op
# ____________________________________________________________
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/history.py Thu Sep 16 13:52:39 2010
@@ -532,7 +532,7 @@
class BoxFloat(Box):
type = FLOAT
_attrs_ = ('value',)
-
+
def __init__(self, floatval=0.0):
assert isinstance(floatval, float)
self.value = floatval
@@ -759,12 +759,13 @@
assert len(seen) == len(inputargs), (
"duplicate Box in the Loop.inputargs")
TreeLoop.check_consistency_of_branch(operations, seen)
-
+
@staticmethod
def check_consistency_of_branch(operations, seen):
"NOT_RPYTHON"
for op in operations:
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
if isinstance(box, Box):
assert box in seen
if op.is_guard():
@@ -885,7 +886,7 @@
self.aborted_count += 1
def entered(self):
- self.enter_count += 1
+ self.enter_count += 1
def compiled(self):
self.compiled_count += 1
@@ -898,7 +899,7 @@
def add_new_loop(self, loop):
self.loops.append(loop)
-
+
# test read interface
def get_all_loops(self):
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/logger.py Thu Sep 16 13:52:39 2010
@@ -80,10 +80,10 @@
for i in range(len(operations)):
op = operations[i]
if op.opnum == rop.DEBUG_MERGE_POINT:
- loc = op.args[0]._get_str()
+ loc = op.getarg(0)._get_str()
debug_print("debug_merge_point('%s')" % (loc,))
continue
- args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args])
+ args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())])
if op.result is not None:
res = self.repr_of_arg(memo, op.result) + " = "
else:
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizefindnode.py Thu Sep 16 13:52:39 2010
@@ -154,7 +154,8 @@
def find_nodes_default(self, op):
if op.is_always_pure():
- for arg in op.args:
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if self.get_constant_box(arg) is None:
break
else:
@@ -164,8 +165,8 @@
op.opnum, argboxes, op.descr)
self.set_constant_node(op.result, resbox.constbox())
# default case: mark the arguments as escaping
- for box in op.args:
- self.getnode(box).mark_escaped()
+ for i in range(op.numargs()):
+ self.getnode(op.getarg(i)).mark_escaped()
def find_nodes_no_escape(self, op):
pass # for operations that don't escape their arguments
@@ -178,7 +179,7 @@
def find_nodes_NEW_WITH_VTABLE(self, op):
instnode = InstanceNode()
- box = op.args[0]
+ box = op.getarg(0)
assert isinstance(box, Const)
instnode.knownclsbox = box
self.nodes[op.result] = instnode
@@ -189,7 +190,7 @@
self.nodes[op.result] = instnode
def find_nodes_NEW_ARRAY(self, op):
- lengthbox = op.args[0]
+ lengthbox = op.getarg(0)
lengthbox = self.get_constant_box(lengthbox)
if lengthbox is None:
return # var-sized arrays are not virtual
@@ -199,28 +200,28 @@
self.nodes[op.result] = arraynode
def find_nodes_ARRAYLEN_GC(self, op):
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.arraydescr is not None:
resbox = ConstInt(arraynode.arraysize)
self.set_constant_node(op.result, resbox)
def find_nodes_GUARD_CLASS(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownclsbox = box
def find_nodes_GUARD_VALUE(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownvaluebox = box
def find_nodes_SETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
- fieldnode = self.getnode(op.args[1])
+ instnode = self.getnode(op.getarg(0))
+ fieldnode = self.getnode(op.getarg(1))
if instnode.escaped:
fieldnode.mark_escaped()
return # nothing to be gained from tracking the field
@@ -232,7 +233,7 @@
instnode.add_escape_dependency(fieldnode)
def find_nodes_GETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.escaped:
return # nothing to be gained from tracking the field
field = op.descr
@@ -254,13 +255,13 @@
find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC
def find_nodes_SETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
- itemnode = self.getnode(op.args[2])
+ arraynode = self.getnode(op.getarg(0))
+ itemnode = self.getnode(op.getarg(2))
if arraynode.escaped:
itemnode.mark_escaped()
return # nothing to be gained from tracking the item
@@ -270,12 +271,12 @@
arraynode.add_escape_dependency(itemnode)
def find_nodes_GETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.escaped:
return # nothing to be gained from tracking the item
index = indexbox.getint()
@@ -298,13 +299,15 @@
def find_nodes_JUMP(self, op):
# only set up the 'unique' field of the InstanceNodes;
# real handling comes later (build_result_specnodes() for loops).
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).set_unique_nodes()
def find_nodes_FINISH(self, op):
# only for bridges, and only for the ones that end in a 'return'
# or 'raise'; all other cases end with a JUMP.
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).unique = UNIQUE_NO
find_nodes_ops = _findall(NodeFinder, 'find_nodes_')
@@ -344,13 +347,13 @@
# computed by NodeFinder.find_nodes().
op = loop.operations[-1]
assert op.opnum == rop.JUMP
- assert len(self.inputnodes) == len(op.args)
+ assert len(self.inputnodes) == op.numargs()
while True:
self.restart_needed = False
specnodes = []
- for i in range(len(op.args)):
+ for i in range(op.numargs()):
inputnode = self.inputnodes[i]
- exitnode = self.getnode(op.args[i])
+ exitnode = self.getnode(op.getarg(i))
specnodes.append(self.intersect(inputnode, exitnode))
if not self.restart_needed:
break
@@ -562,9 +565,9 @@
def bridge_matches(self, nextloop_specnodes):
jump_op = self.jump_op
- assert len(jump_op.args) == len(nextloop_specnodes)
+ assert jump_op.numargs() == len(nextloop_specnodes)
for i in range(len(nextloop_specnodes)):
- exitnode = self.getnode(jump_op.args[i])
+ exitnode = self.getnode(jump_op.getarg(i))
if not nextloop_specnodes[i].matches_instance_node(exitnode):
return False
return True
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 16 13:52:39 2010
@@ -25,7 +25,7 @@
b = v.intbound
if b.has_lower and b.has_upper and b.lower == b.upper:
v.make_constant(ConstInt(b.lower))
-
+
try:
op = self.optimizer.producer[box]
except KeyError:
@@ -35,19 +35,19 @@
if opnum == value:
func(self, op)
break
-
+
def optimize_GUARD_TRUE(self, op):
self.emit_operation(op)
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
optimize_GUARD_FALSE = optimize_GUARD_TRUE
optimize_GUARD_VALUE = optimize_GUARD_TRUE
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
-
+
r = self.getvalue(op.result)
if v2.is_constant():
val = v2.box.getint()
@@ -57,31 +57,31 @@
val = v1.box.getint()
if val >= 0:
r.intbound.intersect(IntBound(0,val))
-
+
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.sub_bound(v2.intbound))
-
+
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.add_bound(v2.intbound))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.mul_bound(v2.intbound))
def optimize_INT_ADD_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.add_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
@@ -93,10 +93,10 @@
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
-
+
def optimize_INT_SUB_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.sub_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
@@ -110,8 +110,8 @@
r.intbound.intersect(resbound)
def optimize_INT_MUL_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.mul_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
@@ -123,10 +123,10 @@
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
-
+
def optimize_INT_LT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_ge(v2.intbound):
@@ -135,8 +135,8 @@
self.emit_operation(op)
def optimize_INT_GT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_le(v2.intbound):
@@ -145,8 +145,8 @@
self.emit_operation(op)
def optimize_INT_LE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_le(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_gt(v2.intbound):
@@ -155,8 +155,8 @@
self.emit_operation(op)
def optimize_INT_GE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_ge(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
@@ -165,134 +165,134 @@
self.emit_operation(op)
def optimize_INT_EQ(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 0)
elif v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 0)
- else:
+ else:
self.emit_operation(op)
-
+
def optimize_INT_NE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 1)
- else:
+ else:
self.emit_operation(op)
-
- def make_int_lt(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+
+ def make_int_lt(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_lt(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_gt(v1.intbound):
- self.propagate_bounds_backward(args[1])
-
+ self.propagate_bounds_backward(box2)
+
- def make_int_le(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+ def make_int_le(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_le(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_ge(v1.intbound):
- self.propagate_bounds_backward(args[1])
+ self.propagate_bounds_backward(box2)
- def make_int_gt(self, args):
- self.make_int_lt([args[1], args[0]])
+ def make_int_gt(self, box1, box2):
+ self.make_int_lt(box2, box1)
- def make_int_ge(self, args):
- self.make_int_le([args[1], args[0]])
+ def make_int_ge(self, box1, box2):
+ self.make_int_le(box2, box1)
def propagate_bounds_INT_LT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
else:
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
else:
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_LE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
else:
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
else:
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_EQ(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_NE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_0):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.sub_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.add_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound).mul(-1)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.div_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.div_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD
propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB
@@ -300,4 +300,4 @@
optimize_ops = _findall(OptIntBounds, 'optimize_')
propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_')
-
+
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 16 13:52:39 2010
@@ -16,12 +16,12 @@
LEVEL_UNKNOWN = '\x00'
LEVEL_NONNULL = '\x01'
LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays
-LEVEL_CONSTANT = '\x03'
+LEVEL_CONSTANT = '\x03'
import sys
MAXINT = sys.maxint
MININT = -sys.maxint - 1
-
+
class OptValue(object):
_attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound')
last_guard_index = -1
@@ -36,7 +36,7 @@
if isinstance(box, Const):
self.make_constant(box)
# invariant: box is a Const if and only if level == LEVEL_CONSTANT
-
+
def force_box(self):
return self.box
@@ -171,7 +171,7 @@
def new_const_item(self, arraydescr):
return self.optimizer.new_const_item(arraydescr)
-
+
def pure(self, opnum, args, result):
op = ResOperation(opnum, args, result)
self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
@@ -184,7 +184,7 @@
def setup(self, virtuals):
pass
-
+
class Optimizer(Optimization):
def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True):
@@ -199,7 +199,7 @@
self.pure_operations = args_dict()
self.producer = {}
self.pendingfields = []
-
+
if len(optimizations) == 0:
self.first_optimization = self
else:
@@ -323,11 +323,11 @@
self._emit_operation(op)
def _emit_operation(self, op):
- for i in range(len(op.args)):
- arg = op.args[i]
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
box = self.values[arg].force_box()
- op.args[i] = box
+ op.setarg(i, box)
self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
if op.is_guard():
self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
@@ -368,14 +368,16 @@
descr.make_a_counter_per_value(op)
def make_args_key(self, op):
- args = op.args[:]
- for i in range(len(args)):
- arg = args[i]
+ args = []
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
- args[i] = self.values[arg].get_key_box()
+ args.append(self.values[arg].get_key_box())
+ else:
+ args.append(arg)
args.append(ConstInt(op.opnum))
return args
-
+
def optimize_default(self, op):
canfold = op.is_always_pure()
is_ovf = op.is_ovf()
@@ -383,8 +385,8 @@
nextop = self.loop.operations[self.i + 1]
canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW
if canfold:
- for arg in op.args:
- if self.get_constant_box(arg) is None:
+ for i in range(op.numargs()):
+ if self.get_constant_box(op.getarg(i)) is None:
break
else:
# all constant arguments: constant-fold away
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 16 13:52:39 2010
@@ -67,16 +67,16 @@
return False
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null() or v2.is_null():
self.make_constant_int(op.result, 0)
else:
self.emit_operation(op)
def optimize_INT_OR(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null():
self.make_equal_to(op.result, v2)
elif v2.is_null():
@@ -85,20 +85,20 @@
self.emit_operation(op)
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v2.is_constant() and v2.box.getint() == 0:
self.make_equal_to(op.result, v1)
else:
self.emit_operation(op)
# Synthesize the reverse ops for optimize_default to reuse
- self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1])
+ self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1))
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 0 the result is the other side.
if v1.is_constant() and v1.box.getint() == 0:
@@ -109,12 +109,12 @@
self.emit_operation(op)
# Synthesize the reverse op for optimize_default to reuse
- self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1])
+ self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 1 the result is the other side.
if v1.is_constant() and v1.box.getint() == 1:
@@ -133,13 +133,13 @@
break
else:
# all constant arguments: constant-fold away
- self.make_constant(op.result, op.args[0])
+ self.make_constant(op.result, op.getarg(0))
return
# replace CALL_PURE with just CALL
self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
op.descr))
def optimize_guard(self, op, constbox, emit_operation=True):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_constant():
box = value.box
assert isinstance(box, Const)
@@ -151,7 +151,7 @@
value.make_constant(constbox)
def optimize_GUARD_ISNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_null():
return
elif value.is_nonnull():
@@ -160,7 +160,7 @@
value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
def optimize_GUARD_NONNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_nonnull():
return
elif value.is_null():
@@ -169,7 +169,7 @@
value.make_nonnull(len(self.optimizer.newoperations) - 1)
def optimize_GUARD_VALUE(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
emit_operation = True
if value.last_guard_index != -1:
# there already has been a guard_nonnull or guard_class or
@@ -178,7 +178,7 @@
old_guard_op = self.optimizer.newoperations[value.last_guard_index]
old_opnum = old_guard_op.opnum
old_guard_op.opnum = rop.GUARD_VALUE
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)]
# hack hack hack. Change the guard_opnum on
# old_guard_op.descr so that when resuming,
# the operation is not skipped by pyjitpl.py.
@@ -187,7 +187,7 @@
descr.guard_opnum = rop.GUARD_VALUE
descr.make_a_counter_per_value(old_guard_op)
emit_operation = False
- constbox = op.args[1]
+ constbox = op.getarg(1)
assert isinstance(constbox, Const)
self.optimize_guard(op, constbox, emit_operation)
@@ -198,8 +198,8 @@
self.optimize_guard(op, CONST_0)
def optimize_GUARD_CLASS(self, op):
- value = self.getvalue(op.args[0])
- expectedclassbox = op.args[1]
+ value = self.getvalue(op.getarg(0))
+ expectedclassbox = op.getarg(1)
assert isinstance(expectedclassbox, Const)
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
@@ -217,7 +217,7 @@
# it was a guard_nonnull, which we replace with a
# guard_nonnull_class.
old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ old_guard_op.args = [old_guard_op.getarg(0), op.getarg(1)]
# hack hack hack. Change the guard_opnum on
# old_guard_op.descr so that when resuming,
# the operation is not skipped by pyjitpl.py.
@@ -239,11 +239,11 @@
self.optimizer.exception_might_have_happened = False
def optimize_CALL_LOOPINVARIANT(self, op):
- funcvalue = self.getvalue(op.args[0])
+ funcvalue = self.getvalue(op.getarg(0))
if not funcvalue.is_constant():
self.emit_operation(op)
return
- key = make_hashable_int(op.args[0].getint())
+ key = make_hashable_int(op.getarg(0).getint())
resvalue = self.optimizer.loop_invariant_results.get(key, None)
if resvalue is not None:
self.make_equal_to(op.result, resvalue)
@@ -265,17 +265,17 @@
self.emit_operation(op)
def optimize_INT_IS_TRUE(self, op):
- if self.getvalue(op.args[0]) in self.optimizer.bool_boxes:
- self.make_equal_to(op.result, self.getvalue(op.args[0]))
+ if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes:
+ self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
return
- self._optimize_nullness(op, op.args[0], True)
+ self._optimize_nullness(op, op.getarg(0), True)
def optimize_INT_IS_ZERO(self, op):
- self._optimize_nullness(op, op.args[0], False)
+ self._optimize_nullness(op, op.getarg(0), False)
def _optimize_oois_ooisnot(self, op, expect_isnot):
- value0 = self.getvalue(op.args[0])
- value1 = self.getvalue(op.args[1])
+ value0 = self.getvalue(op.getarg(0))
+ value1 = self.getvalue(op.getarg(1))
if value0.is_virtual():
if value1.is_virtual():
intres = (value0 is value1) ^ expect_isnot
@@ -285,9 +285,9 @@
elif value1.is_virtual():
self.make_constant_int(op.result, expect_isnot)
elif value1.is_null():
- self._optimize_nullness(op, op.args[0], expect_isnot)
+ self._optimize_nullness(op, op.getarg(0), expect_isnot)
elif value0.is_null():
- self._optimize_nullness(op, op.args[1], expect_isnot)
+ self._optimize_nullness(op, op.getarg(1), expect_isnot)
elif value0 is value1:
self.make_constant_int(op.result, not expect_isnot)
else:
@@ -308,7 +308,7 @@
self._optimize_oois_ooisnot(op, False)
def optimize_INSTANCEOF(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr)
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 16 13:52:39 2010
@@ -258,7 +258,7 @@
def setup(self, virtuals):
if not virtuals:
return
-
+
inputargs = self.optimizer.loop.inputargs
specnodes = self.optimizer.loop.token.specnodes
assert len(inputargs) == len(specnodes)
@@ -288,15 +288,15 @@
target_loop_token = orgop.descr
assert isinstance(target_loop_token, LoopToken)
specnodes = target_loop_token.specnodes
- assert len(op.args) == len(specnodes)
+ assert op.numargs() == len(specnodes)
for i in range(len(specnodes)):
- value = self.getvalue(op.args[i])
+ value = self.getvalue(op.getarg(i))
specnodes[i].teardown_virtual_node(self, value, exitargs)
op.args = exitargs[:]
self.emit_operation(op)
def optimize_VIRTUAL_REF(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
#
# get some constants
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
@@ -322,17 +322,17 @@
# typically a PyPy PyFrame, and now is the end of its execution, so
# forcing it now does not have catastrophic effects.
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
- # op.args[1] should really never point to null here
+ # op.getarg(1) should really never point to null here
# - set 'forced' to point to the real object
op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
descr = vrefinfo.descr_forced)
self.optimize_SETFIELD_GC(op1)
# - set 'virtual_token' to TOKEN_NONE
- args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
+ args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
op1 = ResOperation(rop.SETFIELD_GC, args, None,
descr = vrefinfo.descr_virtual_token)
self.optimize_SETFIELD_GC(op1)
- # Note that in some cases the virtual in op.args[1] has been forced
+ # Note that in some cases the virtual in op.getarg(1) has been forced
# already. This is fine. In that case, and *if* a residual
# CALL_MAY_FORCE suddenly turns out to access it, then it will
# trigger a ResumeGuardForcedDescr.handle_async_forcing() which
@@ -340,7 +340,7 @@
# was already forced).
def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
# optimizefindnode should ensure that fieldvalue is found
assert isinstance(value, AbstractVirtualValue)
@@ -357,8 +357,8 @@
optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(1))
if value.is_virtual():
value.setfield(op.descr, fieldvalue)
else:
@@ -367,17 +367,17 @@
self.emit_operation(op)
def optimize_NEW_WITH_VTABLE(self, op):
- self.make_virtual(op.args[0], op.result, op)
+ self.make_virtual(op.getarg(0), op.result, op)
def optimize_NEW(self, op):
self.make_vstruct(op.descr, op.result, op)
def optimize_NEW_ARRAY(self, op):
- sizebox = self.get_constant_box(op.args[0])
+ sizebox = self.get_constant_box(op.getarg(0))
if sizebox is not None:
# if the original 'op' did not have a ConstInt as argument,
# build a new one with the ConstInt argument
- if not isinstance(op.args[0], ConstInt):
+ if not isinstance(op.getarg(0), ConstInt):
op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
descr=op.descr)
self.make_varray(op.descr, sizebox.getint(), op.result, op)
@@ -386,7 +386,7 @@
self.emit_operation(op)
def optimize_ARRAYLEN_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
self.make_constant_int(op.result, value.getlength())
else:
@@ -395,9 +395,9 @@
self.emit_operation(op)
def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
itemvalue = value.getitem(indexbox.getint())
self.make_equal_to(op.result, itemvalue)
@@ -411,22 +411,22 @@
optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
def optimize_SETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
- value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
+ value.setitem(indexbox.getint(), self.getvalue(op.getarg(2)))
return
value.ensure_nonnull()
###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_ARRAYCOPY(self, op):
- source_value = self.getvalue(op.args[2])
- dest_value = self.getvalue(op.args[3])
- source_start_box = self.get_constant_box(op.args[4])
- dest_start_box = self.get_constant_box(op.args[5])
- length = self.get_constant_box(op.args[6])
+ source_value = self.getvalue(op.getarg(2))
+ dest_value = self.getvalue(op.getarg(3))
+ source_start_box = self.get_constant_box(op.getarg(4))
+ dest_start_box = self.get_constant_box(op.getarg(5))
+ length = self.get_constant_box(op.getarg(6))
if (source_value.is_virtual() and source_start_box and dest_start_box
and length and dest_value.is_virtual()):
# XXX optimize the case where dest value is not virtual,
@@ -439,9 +439,10 @@
return
if length and length.getint() == 0:
return # 0-length arraycopy
- descr = op.args[0]
+ descr = op.getarg(0)
assert isinstance(descr, AbstractDescr)
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
+ args = [op.getarg(i) for i in range(1, op.numargs())]
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
descr))
def propagate_forward(self, op):
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/pyjitpl.py Thu Sep 16 13:52:39 2010
@@ -159,7 +159,7 @@
if got_type == history.INT:
self.registers_i[target_index] = resultbox
elif got_type == history.REF:
- #debug_print(' ->',
+ #debug_print(' ->',
# llmemory.cast_ptr_to_adr(resultbox.getref_base()))
self.registers_r[target_index] = resultbox
elif got_type == history.FLOAT:
@@ -446,7 +446,7 @@
def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
sizebox):
sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
- self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
+ self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
sbox, sizebox)
abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
sizebox)
@@ -1004,7 +1004,7 @@
resumedescr = compile.ResumeGuardDescr(metainterp_sd,
original_greenkey)
guard_op = metainterp.history.record(opnum, moreargs, None,
- descr=resumedescr)
+ descr=resumedescr)
virtualizable_boxes = None
if metainterp.jitdriver_sd.virtualizable_info is not None:
virtualizable_boxes = metainterp.virtualizable_boxes
@@ -1463,7 +1463,7 @@
resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes)
return resbox
- def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
+ def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
canfold = self._all_constants(*argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1472,7 +1472,7 @@
resbox = resbox.nonconstbox() # ensure it is a Box
return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
- def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
+ def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
canfold = self._all_constants_varargs(argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1485,7 +1485,7 @@
assert resbox is None or isinstance(resbox, Box)
# record the operation
profiler = self.staticdata.profiler
- profiler.count_ops(opnum, RECORDED_OPS)
+ profiler.count_ops(opnum, RECORDED_OPS)
op = self.history.record(opnum, argboxes, resbox, descr)
self.attach_debug_info(op)
return resbox
@@ -1667,7 +1667,7 @@
# Search in current_merge_points for original_boxes with compatible
# green keys, representing the beginning of the same loop as the one
- # we end now.
+ # we end now.
num_green_args = self.jitdriver_sd.num_green_args
for j in range(len(self.current_merge_points)-1, -1, -1):
@@ -2090,8 +2090,8 @@
op = self.history.operations[-1]
assert op.opnum == rop.CALL
resbox_as_const = resbox.constbox()
- for arg in op.args:
- if not isinstance(arg, Const):
+ for i in op.numarg():
+ if not isinstance(op.getarg(i), Const):
break
else:
# all-constants: remove the CALL operation now and propagate a
@@ -2101,7 +2101,8 @@
# not all constants (so far): turn CALL into CALL_PURE, which might
# be either removed later by optimizeopt or turned back into CALL.
op.opnum = rop.CALL_PURE
- op.args = [resbox_as_const] + op.args
+ # XXX XXX replace...
+ op._args = [resbox_as_const] + op._args
return resbox
def direct_assembler_call(self, targetjitdriver_sd):
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/resoperation.py Thu Sep 16 13:52:39 2010
@@ -15,12 +15,21 @@
make_sure_not_resized(args)
assert isinstance(opnum, int)
self.opnum = opnum
- self.args = list(args)
- make_sure_not_resized(self.args)
+ self._args = list(args)
+ make_sure_not_resized(self._args)
assert not isinstance(result, list)
self.result = result
self.setdescr(descr)
+ def getarg(self, i):
+ return self._args[i]
+
+ def setarg(self, i, box):
+ self._args[i] = box
+
+ def numargs(self):
+ return len(self._args)
+
def setdescr(self, descr):
# for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
# instance provided by the backend holding details about the type
@@ -35,10 +44,10 @@
descr = self.descr
if descr is not None:
descr = descr.clone_if_mutable()
- op = ResOperation(self.opnum, self.args, self.result, descr)
+ op = ResOperation(self.opnum, self._args, self.result, descr)
op.fail_args = self.fail_args
+ op.name = self.name
if not we_are_translated():
- op.name = self.name
op.pc = self.pc
return op
@@ -57,10 +66,10 @@
prefix = ""
if self.descr is None or we_are_translated():
return '%s%s%s(%s)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]))
+ ', '.join([str(a) for a in self._args]))
else:
return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]), self.descr)
+ ', '.join([str(a) for a in self._args]), self.descr)
def getopname(self):
try:
Modified: pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py (original)
+++ pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 16 13:52:39 2010
@@ -33,7 +33,7 @@
self.profiler = EmptyProfiler()
self.options = Fake()
self.globaldata = Fake()
-
+
def test_store_final_boxes_in_guard():
from pypy.jit.metainterp.compile import ResumeGuardDescr
from pypy.jit.metainterp.resume import tag, TAGBOX
@@ -75,7 +75,7 @@
assert lst3 == [LLtypeMixin.valuedescr]
lst4 = virt1._get_field_descr_list()
assert lst3 is lst4
-
+
virt2 = virtualize.AbstractVirtualStructValue(opt, None)
lst5 = virt2._get_field_descr_list()
assert lst5 is lst1
@@ -141,8 +141,10 @@
txt1 = txt1[39:]
txt2 = txt2[39:]
assert op1.opnum == op2.opnum
- assert len(op1.args) == len(op2.args)
- for x, y in zip(op1.args, op2.args):
+ assert op1.numargs() == op2.numargs()
+ for i in range(op1.numargs()):
+ x = op1.getarg(i)
+ y = op2.getarg(i)
assert x == remap.get(y, y)
if op2.result in remap:
assert op1.result == remap[op2.result]
@@ -489,7 +491,7 @@
jump()
"""
self.optimize_loop(ops, 'Constant(myptr)', expected)
-
+
def test_ooisnull_oononnull_1(self):
ops = """
[p0]
@@ -842,7 +844,7 @@
jump(f, f1)
"""
self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)',
- expected, checkspecnodes=False)
+ expected, checkspecnodes=False)
def test_virtual_2(self):
ops = """
@@ -2171,7 +2173,7 @@
jump(i1, i0)
"""
self.optimize_loop(ops, 'Not, Not', expected)
-
+
def test_fold_partially_constant_ops(self):
ops = """
[i0]
@@ -2183,7 +2185,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
ops = """
[i0]
i1 = int_add(i0, 0)
@@ -2194,7 +2196,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
ops = """
[i0]
i1 = int_add(0, i0)
@@ -2205,7 +2207,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
# ----------
def make_fail_descr(self):
@@ -3119,7 +3121,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_noguard(self):
ops = """
[i0]
@@ -3134,7 +3136,7 @@
jump(i2)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_noopt(self):
ops = """
[i0]
@@ -3153,7 +3155,7 @@
jump(4)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_rev(self):
ops = """
[i0]
@@ -3170,7 +3172,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_tripple(self):
ops = """
[i0]
@@ -3189,7 +3191,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add(self):
ops = """
[i0]
@@ -3204,11 +3206,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_add(i0, 10)
+ i2 = int_add(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_before(self):
ops = """
[i0]
@@ -3227,7 +3229,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_ovf(self):
ops = """
[i0]
@@ -3243,11 +3245,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_add(i0, 10)
+ i2 = int_add(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_ovf_before(self):
ops = """
[i0]
@@ -3268,7 +3270,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_sub(self):
ops = """
[i0]
@@ -3283,11 +3285,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_sub(i0, 10)
+ i2 = int_sub(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_sub_before(self):
ops = """
[i0]
@@ -3306,7 +3308,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_ltle(self):
ops = """
[i0]
@@ -3357,7 +3359,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_gtge(self):
ops = """
[i0]
@@ -3374,7 +3376,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_gegt(self):
ops = """
[i0]
@@ -3558,7 +3560,7 @@
i14 = int_gt(i1, 10)
guard_true(i14) []
i15 = int_ge(i1, 20)
- guard_true(i15) []
+ guard_true(i15) []
jump(i1)
"""
expected = """
@@ -3571,7 +3573,7 @@
i14 = int_gt(i1, 10)
guard_true(i14) []
i15 = int_ge(i1, 20)
- guard_true(i15) []
+ guard_true(i15) []
jump(i1)
"""
self.optimize_loop(ops, 'Not', expected)
@@ -3818,7 +3820,7 @@
"""
self.optimize_loop(ops, 'Not, Not', expected)
-
+
##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin):
@@ -3835,7 +3837,7 @@
## jump(1)
## """
## self.optimize_loop(ops, 'Not', expected)
-
+
## def test_instanceof_guard_class(self):
## ops = """
## [i0, p0]
More information about the Pypy-commit
mailing list