[pypy-svn] r62888 - in pypy/branch/pyjitpl5/pypy/jit: backend/llgraph metainterp metainterp/test
arigo at codespeak.net
arigo at codespeak.net
Thu Mar 12 15:11:20 CET 2009
Author: arigo
Date: Thu Mar 12 15:11:19 2009
New Revision: 62888
Modified:
pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/llimpl.py
pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/runner.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/codewriter.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/history.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/optimize.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/pyjitpl.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/resoperation.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/specnode.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_list_optimize.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_optimize.py
pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_vable_optimize.py
Log:
Uniformize the interface, passing instances of Descr all around
and not converting between them and integers in optimize.py.
Modified: pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/llimpl.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/llimpl.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/llimpl.py Thu Mar 12 15:11:19 2009
@@ -135,11 +135,11 @@
return '\n'.join(lines)
class Operation(object):
- def __init__(self, opnum, descr):
+ def __init__(self, opnum):
self.opnum = opnum
self.args = []
self.result = None
- self.descr = descr
+ self.descr = None
self.livevars = [] # for guards only
def __repr__(self):
@@ -220,7 +220,7 @@
assert x == 0 or x == 1
return str(bool(x))
#elif tp == 'fieldname':
- # return str(symbolic.TokenToField[x/2][1])
+ # return str(symbolic.TokenToField[x...][1])
else:
raise NotImplementedError("tp = %s" % tp)
@@ -248,9 +248,16 @@
_variables.append(v)
return r
-def compile_add(loop, opnum, descr):
+def compile_add(loop, opnum):
loop = _from_opaque(loop)
- loop.operations.append(Operation(opnum, descr))
+ loop.operations.append(Operation(opnum))
+
+def compile_add_descr(loop, ofs, type):
+ from pypy.jit.backend.llgraph.runner import Descr
+ loop = _from_opaque(loop)
+ op = loop.operations[-1]
+ assert isinstance(type, str) and len(type) == 1
+ op.descr = Descr(ofs, type)
def compile_add_var(loop, intvar):
loop = _from_opaque(loop)
@@ -520,61 +527,62 @@
# delegating to the builtins do_xxx() (done automatically for simple cases)
def op_getarrayitem_gc(self, arraydescr, array, index):
- if arraydescr.getint() & 1:
+ if arraydescr.type == 'p':
return do_getarrayitem_gc_ptr(array, index)
else:
return do_getarrayitem_gc_int(array, index, self.memocast)
def op_getfield_gc(self, fielddescr, struct):
- fielddescr = fielddescr.getint()
- if fielddescr & 1:
- return do_getfield_gc_ptr(struct, fielddescr)
+ if fielddescr.type == 'p':
+ return do_getfield_gc_ptr(struct, fielddescr.ofs)
else:
- return do_getfield_gc_int(struct, fielddescr, self.memocast)
+ return do_getfield_gc_int(struct, fielddescr.ofs, self.memocast)
def op_getfield_raw(self, fielddescr, struct):
- if fielddescr.getint() & 1:
- return do_getfield_raw_ptr(struct, fielddescr)
+ if fielddescr.type == 'p':
+ return do_getfield_raw_ptr(struct, fielddescr.ofs)
else:
- return do_getfield_raw_int(struct, fielddescr, self.memocast)
+ return do_getfield_raw_int(struct, fielddescr.ofs, self.memocast)
def op_new_with_vtable(self, size, vtable):
- result = do_new(size.getint())
+ result = do_new(size.ofs)
value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, result)
value.typeptr = cast_from_int(rclass.CLASSTYPE, vtable, self.memocast)
return result
def op_setarrayitem_gc(self, arraydescr, array, index, newvalue):
- if arraydescr.getint() & 1:
+ if arraydescr.type == 'p':
do_setarrayitem_gc_ptr(array, index, newvalue)
else:
do_setarrayitem_gc_int(array, index, newvalue, self.memocast)
def op_setfield_gc(self, fielddescr, struct, newvalue):
- fielddescr = fielddescr.getint()
- if fielddescr & 1:
- do_setfield_gc_ptr(struct, fielddescr, newvalue)
+ if fielddescr.type == 'p':
+ do_setfield_gc_ptr(struct, fielddescr.ofs, newvalue)
else:
- do_setfield_gc_int(struct, fielddescr, newvalue, self.memocast)
+ do_setfield_gc_int(struct, fielddescr.ofs, newvalue,
+ self.memocast)
def op_setfield_raw(self, fielddescr, struct, newvalue):
- if fielddescr.getint() & 1:
- do_setfield_raw_ptr(struct, fielddescr, newvalue)
+ if fielddescr.type == 'p':
+ do_setfield_raw_ptr(struct, fielddescr.ofs, newvalue)
else:
- do_setfield_raw_int(struct, fielddescr, newvalue, self.memocast)
+ do_setfield_raw_int(struct, fielddescr.ofs, newvalue,
+ self.memocast)
def op_call(self, calldescr, func, *args):
_call_args[:] = args
- if calldescr == sys.maxint:
+ if calldescr.type == 'v':
err_result = None
- elif calldescr.getint() & 1:
+ elif calldescr.type == 'p':
err_result = lltype.nullptr(llmemory.GCREF.TO)
else:
+ assert calldescr.type == 'i'
err_result = 0
return _do_call_common(func, self.memocast, err_result)
def op_new_array(self, arraydescr, count):
- return do_new_array(arraydescr.getint(), count)
+ return do_new_array(arraydescr.ofs, count)
# ____________________________________________________________
@@ -748,26 +756,26 @@
array = array._obj.container
return cast_to_ptr(array.getitem(index))
-def do_getfield_gc_int(struct, fielddesc, memocast):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_getfield_gc_int(struct, fieldnum, memocast):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), struct)
x = getattr(ptr, fieldname)
return cast_to_int(x, memocast)
-def do_getfield_gc_ptr(struct, fielddesc):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_getfield_gc_ptr(struct, fieldnum):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), struct)
x = getattr(ptr, fieldname)
return cast_to_ptr(x)
-def do_getfield_raw_int(struct, fielddesc, memocast):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_getfield_raw_int(struct, fieldnum, memocast):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = llmemory.cast_adr_to_ptr(struct, lltype.Ptr(STRUCT))
x = getattr(ptr, fieldname)
return cast_to_int(x, memocast)
-def do_getfield_raw_ptr(struct, fielddesc):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_getfield_raw_ptr(struct, fieldnum):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = llmemory.cast_adr_to_ptr(struct, lltype.Ptr(STRUCT))
x = getattr(ptr, fieldname)
return cast_to_ptr(x)
@@ -777,8 +785,8 @@
x = lltype.malloc(TYPE)
return cast_to_ptr(x)
-def do_new_array(arraydesc, count):
- TYPE = symbolic.Size2Type[arraydesc/2]
+def do_new_array(arraynum, count):
+ TYPE = symbolic.Size2Type[arraynum]
x = lltype.malloc(TYPE, count)
return cast_to_ptr(x)
@@ -794,29 +802,29 @@
newvalue = cast_from_ptr(ITEMTYPE, newvalue)
array.setitem(index, newvalue)
-def do_setfield_gc_int(struct, fielddesc, newvalue, memocast):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_setfield_gc_int(struct, fieldnum, newvalue, memocast):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), struct)
FIELDTYPE = getattr(STRUCT, fieldname)
newvalue = cast_from_int(FIELDTYPE, newvalue, memocast)
setattr(ptr, fieldname, newvalue)
-def do_setfield_gc_ptr(struct, fielddesc, newvalue):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_setfield_gc_ptr(struct, fieldnum, newvalue):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = lltype.cast_opaque_ptr(lltype.Ptr(STRUCT), struct)
FIELDTYPE = getattr(STRUCT, fieldname)
newvalue = cast_from_ptr(FIELDTYPE, newvalue)
setattr(ptr, fieldname, newvalue)
-def do_setfield_raw_int(struct, fielddesc, newvalue, memocast):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_setfield_raw_int(struct, fieldnum, newvalue, memocast):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = llmemory.cast_adr_to_ptr(struct, lltype.Ptr(STRUCT))
FIELDTYPE = getattr(STRUCT, fieldname)
newvalue = cast_from_int(FIELDTYPE, newvalue, memocast)
setattr(ptr, fieldname, newvalue)
-def do_setfield_raw_ptr(struct, fielddesc, newvalue):
- STRUCT, fieldname = symbolic.TokenToField[fielddesc/2]
+def do_setfield_raw_ptr(struct, fieldnum, newvalue):
+ STRUCT, fieldname = symbolic.TokenToField[fieldnum]
ptr = llmemory.cast_adr_to_ptr(struct, lltype.Ptr(STRUCT))
FIELDTYPE = getattr(STRUCT, fieldname)
newvalue = cast_from_ptr(FIELDTYPE, newvalue)
@@ -935,6 +943,7 @@
setannotation(compile_start_int_var, annmodel.SomeInteger())
setannotation(compile_start_ptr_var, annmodel.SomeInteger())
setannotation(compile_add, annmodel.s_None)
+setannotation(compile_add_descr, annmodel.s_None)
setannotation(compile_add_var, annmodel.s_None)
setannotation(compile_add_int_const, annmodel.s_None)
setannotation(compile_add_ptr_const, annmodel.s_None)
Modified: pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/runner.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/runner.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/backend/llgraph/runner.py Thu Mar 12 15:11:19 2009
@@ -14,6 +14,40 @@
pass
+class Descr(history.AbstractValue):
+ def __init__(self, ofs, type='?'):
+ self.ofs = ofs
+ self.type = type
+
+ def __hash__(self):
+ return hash((self.ofs, self.type))
+
+ def __eq__(self, other):
+ if not isinstance(other, Descr):
+ return NotImplemented
+ return self.ofs == other.ofs and self.type == other.type
+
+ def __ne__(self, other):
+ if not isinstance(other, Descr):
+ return NotImplemented
+ return self.ofs != other.ofs or self.type != other.type
+
+ def sort_key(self):
+ return self.ofs
+
+ def __lt__(self, other):
+ raise TypeError("cannot use comparison on Descrs")
+ def __le__(self, other):
+ raise TypeError("cannot use comparison on Descrs")
+ def __gt__(self, other):
+ raise TypeError("cannot use comparison on Descrs")
+ def __ge__(self, other):
+ raise TypeError("cannot use comparison on Descrs")
+
+ def __repr__(self):
+ return '<Descr %r, %r>' % (self.ofs, self.type)
+
+
class CPU(object):
def __init__(self, rtyper, stats=None, translate_support_code=False,
@@ -64,7 +98,9 @@
op._compiled = c
op._opindex = j
j += 1
- llimpl.compile_add(c, op.opnum, op.descr)
+ llimpl.compile_add(c, op.opnum)
+ if op.descr is not None:
+ llimpl.compile_add_descr(c, op.descr.ofs, op.descr.type)
for x in op.args:
if isinstance(x, history.Box):
llimpl.compile_add_var(c, var2index[x])
@@ -186,7 +222,7 @@
@staticmethod
def sizeof(S):
- return history.ConstInt(symbolic.get_size(S))
+ return Descr(symbolic.get_size(S))
@staticmethod
def numof(S):
@@ -198,51 +234,19 @@
def fielddescrof(S, fieldname):
ofs, size = symbolic.get_field_token(S, fieldname)
token = history.getkind(getattr(S, fieldname))
- if token == 'ptr':
- bit = 1
- else:
- bit = 0
- return history.ConstInt(ofs*2 + bit)
+ return Descr(ofs, token[0])
@staticmethod
def arraydescrof(A):
assert isinstance(A, lltype.GcArray)
size = symbolic.get_size(A)
token = history.getkind(A.OF)
- if token == 'ptr':
- bit = 1
- else:
- bit = 0
- return history.ConstInt(size*2 + bit)
+ return Descr(size, token[0])
@staticmethod
def calldescrof(ARGS, RESULT):
- if RESULT is lltype.Void:
- return sys.maxint
token = history.getkind(RESULT)
- if token == 'ptr':
- return 1
- else:
- return 0
-
- @staticmethod
- def typefor(fielddesc):
- fielddesc = fielddesc.getint()
- if fielddesc == sys.maxint:
- return 'void'
- if fielddesc % 2:
- return 'ptr'
- return 'int'
-
- @staticmethod
- def itemoffsetof(A):
- basesize, itemsize, ofs_length = symbolic.get_array_token(A)
- return basesize
-
- @staticmethod
- def arraylengthoffset(A):
- basesize, itemsize, ofs_length = symbolic.get_array_token(A)
- return ofs_length
+ return Descr(0, token[0])
def cast_adr_to_int(self, adr):
return llimpl.cast_adr_to_int(self.memo_cast, adr)
@@ -250,23 +254,17 @@
def cast_int_to_adr(self, int):
return llimpl.cast_int_to_adr(self.memo_cast, int)
- def ofs_from_descr(self, descr):
- return descr.getint()
-
- def repack_descr(self, ofs):
- return history.ConstInt(ofs)
-
# ---------- the backend-dependent operations ----------
def do_arraylen_gc(self, args, arraydescr):
array = args[0].getptr_base()
return history.BoxInt(llimpl.do_arraylen_gc(arraydescr, array))
- def do_strlen(self, args, descr=0):
+ def do_strlen(self, args, descr=None):
string = args[0].getptr_base()
return history.BoxInt(llimpl.do_strlen(0, string))
- def do_strgetitem(self, args, descr=0):
+ def do_strgetitem(self, args, descr=None):
string = args[0].getptr_base()
index = args[1].getint()
return history.BoxInt(llimpl.do_strgetitem(0, string, index))
@@ -274,51 +272,50 @@
def do_getarrayitem_gc(self, args, arraydescr):
array = args[0].getptr_base()
index = args[1].getint()
- if self.typefor(arraydescr) == 'ptr':
+ if arraydescr.type == 'p':
return history.BoxPtr(llimpl.do_getarrayitem_gc_ptr(array, index))
else:
return history.BoxInt(llimpl.do_getarrayitem_gc_int(array, index,
self.memo_cast))
- def do_getfield_gc(self, args, fieldbox):
- fielddescr = fieldbox.getint()
+ def do_getfield_gc(self, args, fielddescr):
struct = args[0].getptr_base()
- if self.typefor(fieldbox) == 'ptr':
+ if fielddescr.type == 'p':
return history.BoxPtr(llimpl.do_getfield_gc_ptr(struct,
- fielddescr))
+ fielddescr.ofs))
else:
return history.BoxInt(llimpl.do_getfield_gc_int(struct,
- fielddescr,
+ fielddescr.ofs,
self.memo_cast))
def do_getfield_raw(self, args, fielddescr):
struct = self.cast_int_to_adr(args[0].getint())
- if self.typefor(fielddescr) == 'ptr':
+ if fielddescr.type == 'p':
return history.BoxPtr(llimpl.do_getfield_raw_ptr(struct,
- fielddescr))
+ fielddescr.ofs))
else:
return history.BoxInt(llimpl.do_getfield_raw_int(struct,
- fielddescr,
+ fielddescr.ofs,
self.memo_cast))
def do_new(self, args, size):
- return history.BoxPtr(llimpl.do_new(size.getint()))
+ return history.BoxPtr(llimpl.do_new(size.ofs))
def do_new_with_vtable(self, args, size):
vtable = args[0].getint()
- result = llimpl.do_new(size.getint())
- llimpl.do_setfield_gc_int(result, self.fielddescrof_vtable.getint(),
+ result = llimpl.do_new(size.ofs)
+ llimpl.do_setfield_gc_int(result, self.fielddescrof_vtable.ofs,
vtable, self.memo_cast)
return history.BoxPtr(result)
def do_new_array(self, args, size):
count = args[0].getint()
- return history.BoxPtr(llimpl.do_new_array(size.getint(), count))
+ return history.BoxPtr(llimpl.do_new_array(size.ofs, count))
def do_setarrayitem_gc(self, args, arraydescr):
array = args[0].getptr_base()
index = args[1].getint()
- if self.typefor(arraydescr) == 'ptr':
+ if arraydescr.type == 'p':
newvalue = args[2].getptr_base()
llimpl.do_setarrayitem_gc_ptr(array, index, newvalue)
else:
@@ -326,33 +323,31 @@
llimpl.do_setarrayitem_gc_int(array, index, newvalue,
self.memo_cast)
- def do_setfield_gc(self, args, fieldbox):
- fielddescr = fieldbox.getint()
+ def do_setfield_gc(self, args, fielddescr):
struct = args[0].getptr_base()
- if self.typefor(fieldbox) == 'ptr':
+ if fielddescr.type == 'p':
newvalue = args[1].getptr_base()
- llimpl.do_setfield_gc_ptr(struct, fielddescr, newvalue)
+ llimpl.do_setfield_gc_ptr(struct, fielddescr.ofs, newvalue)
else:
newvalue = args[1].getint()
- llimpl.do_setfield_gc_int(struct, fielddescr, newvalue,
+ llimpl.do_setfield_gc_int(struct, fielddescr.ofs, newvalue,
self.memo_cast)
def do_setfield_raw(self, args, fielddescr):
- fielddescr = fielddescr.getint()
struct = self.cast_int_to_adr(args[0].getint())
- if self.typefor(fielddescr) == 'ptr':
+ if fielddescr.type == 'p':
newvalue = args[1].getptr_base()
- llimpl.do_setfield_raw_ptr(struct, fielddescr, newvalue)
+ llimpl.do_setfield_raw_ptr(struct, fielddescr.ofs, newvalue)
else:
newvalue = args[1].getint()
- llimpl.do_setfield_raw_int(struct, fielddescr, newvalue,
+ llimpl.do_setfield_raw_int(struct, fielddescr.ofs, newvalue,
self.memo_cast)
- def do_newstr(self, args, descr=0):
+ def do_newstr(self, args, descr=None):
length = args[0].getint()
return history.BoxPtr(llimpl.do_newstr(0, length))
- def do_strsetitem(self, args, descr=0):
+ def do_strsetitem(self, args, descr=None):
string = args[0].getptr_base()
index = args[1].getint()
newvalue = args[2].getint()
@@ -366,12 +361,11 @@
llimpl.do_call_pushptr(arg.getptr_base())
else:
llimpl.do_call_pushint(arg.getint())
- restype = self.typefor(calldescr)
- if restype == 'ptr':
+ if calldescr.type == 'p':
return history.BoxPtr(llimpl.do_call_ptr(func, self.memo_cast))
- elif restype == 'int':
+ elif calldescr.type == 'i':
return history.BoxInt(llimpl.do_call_int(func, self.memo_cast))
- else: # restype == 'void'
+ else: # calldescr.type == 'v' # void
llimpl.do_call_void(func, self.memo_cast)
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/codewriter.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/codewriter.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/codewriter.py Thu Mar 12 15:11:19 2009
@@ -234,8 +234,6 @@
Returns its index in the list self.positions[].
"""
if constvalue is _we_are_jitted: constvalue = True
- if isinstance(constvalue, history.Const):
- return self.get_position(constvalue)
const = Const._new(constvalue, self.cpu)
return self.get_position(const)
@@ -529,10 +527,10 @@
# store the vtable as an address -- that's fine, because the
# GC doesn't need to follow them
self.emit('new_with_vtable',
- self.const_position(self.cpu.sizeof(STRUCT)),
+ self.get_position(self.cpu.sizeof(STRUCT)),
self.const_position(vtable))
else:
- self.emit('new', self.const_position(self.cpu.sizeof(STRUCT)))
+ self.emit('new', self.get_position(self.cpu.sizeof(STRUCT)))
self.register_var(op.result)
def serialize_op_malloc_varsize(self, op):
@@ -544,7 +542,7 @@
ARRAY = op.args[0].value
arraydescr = self.cpu.arraydescrof(ARRAY)
self.emit('new_array')
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(op.args[2]))
self.register_var(op.result)
@@ -571,7 +569,7 @@
self.emit(self.var_position(v_inst))
offset = self.cpu.fielddescrof(v_inst.concretetype.TO,
c_fieldname.value)
- self.emit(self.const_position(offset))
+ self.emit(self.get_position(offset))
self.register_var(op.result)
#self._eventualy_builtin(op.result)
@@ -589,7 +587,7 @@
self.emit(self.var_position(v_inst))
offset = self.cpu.fielddescrof(v_inst.concretetype.TO,
c_fieldname.value)
- self.emit(self.const_position(offset))
+ self.emit(self.get_position(offset))
self.emit(self.var_position(v_value))
def is_typeptr_getset(self, op):
@@ -608,7 +606,7 @@
arraydescr = self.cpu.arraydescrof(ARRAY)
self.emit('getarrayitem_gc')
self.emit(self.var_position(op.args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(op.args[1]))
self.register_var(op.result)
@@ -618,7 +616,7 @@
arraydescr = self.cpu.arraydescrof(ARRAY)
self.emit('setarrayitem_gc')
self.emit(self.var_position(op.args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(op.args[1]))
self.emit(self.var_position(op.args[2]))
@@ -703,7 +701,7 @@
op.args[1:],
op.result)
self.emit('residual_call')
- self.emit(self.const_position(calldescr))
+ self.emit(self.get_position(calldescr))
self.emit_varargs([op.args[0]] + non_void_args)
self.register_var(op.result)
@@ -751,7 +749,7 @@
calldescr, non_void_args = self.codewriter.getcalldescr(c_func, args,
op.result)
self.emit(opname)
- self.emit(self.const_position(calldescr))
+ self.emit(self.get_position(calldescr))
self.emit_varargs([c_func] + non_void_args)
self.register_var(op.result)
@@ -775,7 +773,7 @@
v_default.value != TP.TO.OF._defl()):
return False # variable or non-null initial value
self.emit('new_array')
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(args[0]))
self.register_var(op.result)
return True
@@ -794,7 +792,7 @@
return False
self.emit('setarrayitem_gc')
self.emit(self.var_position(args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(index))
self.emit(self.var_position(args[2]))
self.register_var(op.result)
@@ -804,7 +802,7 @@
oopspec_name == 'list.len_foldable'):
self.emit('arraylen_gc')
self.emit(self.var_position(args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.register_var(op.result)
return True
#
@@ -816,7 +814,7 @@
return False
self.emit(opname)
self.emit(self.var_position(args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(index))
self.register_var(op.result)
return True
@@ -841,7 +839,7 @@
else:
self.emit('check_neg_index')
self.emit(self.var_position(args[0]))
- self.emit(self.const_position(arraydescr))
+ self.emit(self.get_position(arraydescr))
self.emit(self.var_position(args[1]))
v_posindex = Variable('posindex')
v_posindex.concretetype = lltype.Signed
@@ -883,7 +881,7 @@
self.emit('guard_nonvirtualized')
self.emit(self.var_position(op.args[0]))
self.emit(self.get_position(virtualizabledesc))
- self.emit(self.const_position(guard_field))
+ self.emit(self.get_position(guard_field))
# ----------
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/history.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/history.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/history.py Thu Mar 12 15:11:19 2009
@@ -161,6 +161,8 @@
def _getrepr_(self):
return self.value
+ sort_key = getint
+
CONST_FALSE = ConstInt(0)
CONST_TRUE = ConstInt(1)
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/optimize.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/optimize.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/optimize.py Thu Mar 12 15:11:19 2009
@@ -33,11 +33,6 @@
class CancelInefficientLoop(Exception):
pass
-def convert_vdesc(cpu, vdesc):
- if vdesc:
- return [cpu.ofs_from_descr(i) for i in vdesc.virtuals]
- return []
-
class AllocationStorage(object):
def __init__(self):
# allocations: list of vtables to allocate
@@ -188,7 +183,6 @@
return NotSpecNode()
return FixedClassSpecNode(known_class)
if not other.escaped:
-
fields = []
if self is other:
d = self.origfields.copy()
@@ -196,7 +190,7 @@
else:
d = other.curfields
lst = d.keys()
- sort_integers(lst)
+ sort_descrs(lst)
for ofs in lst:
node = d[ofs]
if ofs not in self.origfields:
@@ -213,7 +207,7 @@
if not other.virtualized and self.expanded_fields:
fields = []
lst = self.expanded_fields.keys()
- sort_integers(lst)
+ sort_descrs(lst)
for ofs in lst:
specnode = SpecNodeWithBox(self.origfields[ofs].source)
fields.append((ofs, specnode))
@@ -225,7 +219,7 @@
d = self.origfields.copy()
d.update(other.curfields)
offsets = d.keys()
- sort_integers(offsets)
+ sort_descrs(offsets)
fields = []
for ofs in offsets:
if ofs in self.origfields and ofs in other.curfields:
@@ -259,33 +253,34 @@
return None
# This does "Perfect specialization" as per doc/jitpl5.txt.
- perfect_specializer = PerfectSpecializer(loop, options)
- perfect_specializer.find_nodes(cpu)
+ perfect_specializer = PerfectSpecializer(loop, options, cpu)
+ perfect_specializer.find_nodes()
perfect_specializer.intersect_input_and_output()
for old_loop in old_loops:
if perfect_specializer.match_exactly(old_loop):
return old_loop
- perfect_specializer.optimize_loop(cpu)
+ perfect_specializer.optimize_loop()
return None
def optimize_bridge(options, old_loops, bridge, cpu=None):
if not options.specialize: # for tests only
return old_loops[0]
- perfect_specializer = PerfectSpecializer(bridge, options)
- perfect_specializer.find_nodes(cpu)
+ perfect_specializer = PerfectSpecializer(bridge, options, cpu)
+ perfect_specializer.find_nodes()
for old_loop in old_loops:
if perfect_specializer.match(old_loop.operations):
perfect_specializer.adapt_for_match(old_loop.operations)
- perfect_specializer.optimize_loop(cpu)
+ perfect_specializer.optimize_loop()
return old_loop
return None # no loop matches
class PerfectSpecializer(object):
- def __init__(self, loop, options=Options()):
+ def __init__(self, loop, options=Options(), cpu=None):
self.loop = loop
self.options = options
+ self.cpu = cpu
self.nodes = {}
self.dependency_graph = []
@@ -304,10 +299,12 @@
return self.nodes[box].source
def find_nodes_setfield(self, instnode, ofs, fieldnode):
+ assert isinstance(ofs, AbstractValue)
instnode.curfields[ofs] = fieldnode
self.dependency_graph.append((instnode, fieldnode))
def find_nodes_getfield(self, instnode, field, box):
+ assert isinstance(field, AbstractValue)
if field in instnode.curfields:
fieldnode = instnode.curfields[field]
elif field in instnode.origfields:
@@ -328,23 +325,14 @@
instnode.cls = InstanceNode(FixedList(op.descr))
fieldbox = op.args[1]
if self.getnode(fieldbox).const:
- item = self.getsource(fieldbox).getint()
- assert item >= 0 # XXX
+ item = self.getsource(fieldbox)
self.find_nodes_getfield(instnode, item, op.result)
else:
instnode.escaped = True
self.nodes[op.result] = InstanceNode(op.result,
escaped=True)
-
-## def find_nodes_insert(self, instnode, field, fieldnode):
-## for ofs, node in instnode.curfields.items():
-## if ofs >= field:
-## instnode.curfields[ofs + 1] = node
-## instnode.curfields[field] = fieldnode
-## instnode.cursize += 1
-## self.dependency_graph.append((instnode, fieldnode))
- def find_nodes(self, cpu):
+ def find_nodes(self):
# Steps (1) and (2)
self.first_escaping_op = True
# only catch can have consts
@@ -375,32 +363,6 @@
else:
instnode.escaped = True
continue
-## elif opname == 'newlist':
-## box = op.results[0]
-## instnode = InstanceNode(box, escaped=False)
-## self.nodes[box] = instnode
-## self.first_escaping_op = False
-## if (isinstance(op.args[1], ConstInt) or
-## self.nodes[op.args[1]].const):
-## size = self.getsource(op.args[1]).getint()
-## instnode.cursize = size
-## instnode.origsize = size
-## # XXX following guard_builtin will set the
-## # correct class, otherwise it's a mess
-## continue
-## elif opname == 'guard_builtin':
-## instnode = self.nodes[op.args[0]]
-## # all builtins have equal classes
-## instnode.cls = InstanceNode(op.args[1])
-## continue
-## elif opname == 'guard_len':
-## instnode = self.nodes[op.args[0]]
-## if instnode.cursize == -1:
-## instnode = self.nodes[op.args[0]]
-## size = op.args[1].getint()
-## instnode.cursize = size
-## instnode.origsize = size
-## continue
elif opnum == rop.GETARRAYITEM_GC:
self.find_nodes_getarrayitem(op)
continue
@@ -415,8 +377,7 @@
instnode.cls = InstanceNode(FixedList(op.descr))
fieldbox = op.args[1]
if self.getnode(fieldbox).const:
- item = self.getsource(fieldbox).getint()
- assert item >= 0 # XXX
+ item = self.getsource(fieldbox)
self.find_nodes_setfield(instnode, item,
self.getnode(op.args[2]))
else:
@@ -426,98 +387,23 @@
continue
elif opnum == rop.SETFIELD_GC:
instnode = self.getnode(op.args[0])
- field = cpu.ofs_from_descr(op.descr)
+ field = op.descr
self.find_nodes_setfield(instnode, field,
self.getnode(op.args[1]))
continue
elif opnum == rop.GETFIELD_GC:
instnode = self.getnode(op.args[0])
- field = cpu.ofs_from_descr(op.descr)
+ field = op.descr
box = op.result
self.find_nodes_getfield(instnode, field, box)
continue
elif opnum == rop.GETFIELD_GC_PURE:
instnode = self.getnode(op.args[0])
- field = cpu.ofs_from_descr(op.descr)
+ field = op.descr
if not instnode.const:
box = op.result
self.find_nodes_getfield(instnode, field, box)
continue
-## elif opname == 'getitem':
-## instnode = self.getnode(op.args[1])
-## fieldbox = op.args[2]
-## if (isinstance(fieldbox, ConstInt) or
-## self.nodes[op.args[2]].const):
-## field = self.getsource(fieldbox).getint()
-## if field < 0:
-## field = instnode.cursize + field
-## box = op.results[0]
-## self.find_nodes_getfield(instnode, field, box)
-## continue
-## else:
-## instnode.escaped = True
-## self.nodes[op.results[0]] = InstanceNode(op.results[0],
-## escaped=True)
-## continue
-## elif opname == 'append':
-## instnode = self.getnode(op.args[1])
-## assert isinstance(instnode.cls.source, ListDescr)
-## if instnode.cursize != -1:
-## field = instnode.cursize
-## instnode.cursize += 1
-## self.find_nodes_setfield(instnode, field,
-## self.getnode(op.args[2]))
-## continue
-## elif opname == 'insert':
-## instnode = self.getnode(op.args[1])
-## assert isinstance(instnode.cls.source, ListDescr)
-## if instnode.cursize != -1:
-## fieldbox = self.getsource(op.args[2])
-## assert isinstance(fieldbox, Const) or fieldbox.const
-## field = fieldbox.getint()
-## if field < 0:
-## field = instnode.cursize + field
-## self.find_nodes_insert(instnode, field,
-## self.getnode(op.args[3]))
-## continue
-## elif opname == 'pop':
-## instnode = self.getnode(op.args[1])
-## assert isinstance(instnode.cls.source, ListDescr)
-## if instnode.cursize != -1:
-## instnode.cursize -= 1
-## field = instnode.cursize
-## self.find_nodes_getfield(instnode, field, op.results[0])
-## if field in instnode.curfields:
-## del instnode.curfields[field]
-## continue
-## self.nodes[op.results[0]] = InstanceNode(op.results[0],
-## escaped=True)
-## self.dependency_graph.append((instnode,
-## self.nodes[op.results[0]]))
-## continue
-## elif opname == 'len' or opname == 'listnonzero':
-## instnode = self.getnode(op.args[1])
-## if not instnode.escaped:
-## assert instnode.cursize != -1
-## lgtbox = op.results[0].constbox()
-## self.nodes[op.results[0]] = InstanceNode(lgtbox, const=True)
-## continue
-## elif opname == 'setitem':
-## instnode = self.getnode(op.args[1])
-## fieldbox = op.args[2]
-## if (isinstance(fieldbox, ConstInt)
-## or self.nodes[op.args[2]].const):
-## field = self.getsource(fieldbox).getint()
-## if field < 0:
-## field = instnode.cursize + field
-## assert field < instnode.cursize
-## self.find_nodes_setfield(instnode, field,
-## self.getnode(op.args[3]))
-## continue
-## else:
-## self.dependency_graph.append((instnode,
-## self.getnode(op.args[3])))
-## instnode.escaped = True
elif opnum == rop.GUARD_CLASS:
instnode = self.getnode(op.args[0])
if instnode.cls is None:
@@ -537,7 +423,8 @@
instnode.virtualized = True
if instnode.cls is None:
instnode.cls = InstanceNode(op.args[1], const=True)
- instnode.vdesc = convert_vdesc(cpu, op.vdesc)
+ if op.vdesc:
+ instnode.vdesc = op.vdesc.virtuals
continue
elif op.is_always_pure():
for arg in op.args:
@@ -598,17 +485,17 @@
specnodes.append(enternode.intersect(leavenode, self.nodes))
self.specnodes = specnodes
- def expanded_version_of(self, boxlist, oplist, cpu):
+ def expanded_version_of(self, boxlist, oplist):
# oplist is None means at the start
newboxlist = []
assert len(boxlist) == len(self.specnodes)
for i in range(len(boxlist)):
box = boxlist[i]
specnode = self.specnodes[i]
- specnode.expand_boxlist(self.nodes[box], newboxlist, oplist, cpu)
+ specnode.expand_boxlist(self.nodes[box], newboxlist, oplist)
return newboxlist
- def optimize_guard(self, op, cpu):
+ def optimize_guard(self, op):
liveboxes = []
storage = AllocationStorage()
memo = {}
@@ -617,7 +504,7 @@
op = op.clone()
for box in old_boxes:
indices.append(storage.deal_with_box(box, self.nodes,
- liveboxes, memo, cpu))
+ liveboxes, memo, self.cpu))
rev_boxes = {}
for i in range(len(liveboxes)):
box = liveboxes[i]
@@ -668,6 +555,7 @@
return op
def optimize_getfield(self, instnode, ofs, box):
+ assert isinstance(ofs, AbstractValue)
if instnode.virtual or instnode.virtualized:
## if ofs < 0:
## ofs = instnode.cursize + ofs
@@ -681,6 +569,7 @@
return False
def optimize_setfield(self, instnode, ofs, valuenode, valuebox):
+ assert isinstance(ofs, AbstractValue)
if instnode.virtual or instnode.virtualized:
## if ofs < 0:
## ofs = instnode.cursize + ofs
@@ -699,11 +588,9 @@
## instnode.curfields[field] = valuenode
## instnode.cursize += 1
- def optimize_loop(self, cpu):
- #self.ready_results = {}
+ def optimize_loop(self):
newoperations = []
exception_might_have_happened = False
- #ops_so_far = []
mp = self.loop.operations[0]
if mp.opnum == rop.MERGE_POINT:
assert len(mp.args) == len(self.specnodes)
@@ -717,61 +604,36 @@
assert not self.nodes[box].virtual
for op in self.loop.operations:
- #ops_so_far.append(op)
-
- #if newoperations and newoperations[-1].results:
- # self.ready_results[newoperations[-1].results[0]] = None
opnum = op.opnum
if opnum == rop.MERGE_POINT:
- args = self.expanded_version_of(op.args, None, cpu)
+ args = self.expanded_version_of(op.args, None)
op = ResOperation(rop.MERGE_POINT, args, None)
newoperations.append(op)
- #for arg in op.args:
- # self.ready_results[arg] = None
continue
- #elif opname == 'catch':
- # for arg in op.args:
- # self.ready_results[arg] = None
elif opnum == rop.JUMP:
- args = self.expanded_version_of(op.args, newoperations, cpu)
+ args = self.expanded_version_of(op.args, newoperations)
for arg in args:
if arg in self.nodes:
assert not self.nodes[arg].virtual
- self.cleanup_field_caches(newoperations, cpu)
+ self.cleanup_field_caches(newoperations)
op = ResOperation(rop.JUMP, args, None)
newoperations.append(op)
continue
-## elif opnum == rop.ARRAYLEN_GC:
-## instnode = self.nodes[op.args[0]]
-
-## elif opname == 'guard_builtin':
-## instnode = self.nodes[op.args[0]]
-## if instnode.cls is None:
-## instnode.cls = InstanceNode(op.args[1])
-## continue
-## elif opname == 'guard_len':
-## # it should be completely gone, because if it escapes
-## # we don't virtualize it anymore
-## instnode = self.nodes[op.args[0]]
-## if not instnode.escaped and instnode.cursize == -1:
-## instnode = self.nodes[op.args[0]]
-## instnode.cursize = op.args[1].getint()
-## continue
elif opnum == rop.GUARD_NO_EXCEPTION:
if not exception_might_have_happened:
continue
exception_might_have_happened = False
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif opnum == rop.GUARD_EXCEPTION:
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif (opnum == rop.GUARD_TRUE or
opnum == rop.GUARD_FALSE):
instnode = self.nodes[op.args[0]]
if instnode.const:
continue
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif opnum == rop.GUARD_CLASS:
instnode = self.nodes[op.args[0]]
@@ -779,7 +641,7 @@
assert op.args[1].equals(instnode.cls.source)
continue
instnode.cls = InstanceNode(op.args[1], const=True)
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif opnum == rop.GUARD_VALUE:
instnode = self.nodes[op.args[0]]
@@ -787,50 +649,38 @@
if instnode.const:
continue
instnode.const = True
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif opnum == rop.GUARD_NONVIRTUALIZED:
instnode = self.nodes[op.args[0]]
if instnode.virtualized or instnode.virtual:
continue
- newoperations.append(self.optimize_guard(op, cpu))
+ newoperations.append(self.optimize_guard(op))
continue
elif opnum == rop.GETFIELD_GC:
instnode = self.nodes[op.args[0]]
- ofs = cpu.ofs_from_descr(op.descr)
- if self.optimize_getfield(instnode, ofs, op.result):
+ if self.optimize_getfield(instnode, op.descr, op.result):
continue
# otherwise we need this getfield, but it does not
# invalidate caches
elif opnum == rop.GETFIELD_GC_PURE:
instnode = self.nodes[op.args[0]]
if not instnode.const:
- ofs = cpu.ofs_from_descr(op.descr)
- if self.optimize_getfield(instnode, ofs, op.result):
+ if self.optimize_getfield(instnode, op.descr, op.result):
continue
elif opnum == rop.GETARRAYITEM_GC:
instnode = self.nodes[op.args[0]]
ofsbox = self.getsource(op.args[1])
if isinstance(ofsbox, ConstInt):
- ofs = ofsbox.getint()
- if self.optimize_getfield(instnode, ofs, op.result):
+ if self.optimize_getfield(instnode, ofsbox, op.result):
continue
elif opnum == rop.GETARRAYITEM_GC_PURE:
instnode = self.nodes[op.args[0]]
ofsbox = self.getsource(op.args[1])
if not instnode.const:
if isinstance(ofsbox, ConstInt):
- ofs = ofsbox.getint()
- if self.optimize_getfield(instnode, ofs, op.result):
+ if self.optimize_getfield(instnode, ofsbox, op.result):
continue
-
-## elif opname == 'getitem':
-## instnode = self.nodes[op.args[1]]
-## ofsbox = self.getsource(op.args[2])
-## if isinstance(ofsbox, ConstInt):
-## ofs = ofsbox.getint()
-## if self.optimize_getfield(instnode, ofs, op.results[0]):
-## continue
elif opnum == rop.NEW_WITH_VTABLE:
# self.nodes[op.results[0]] keep the value from Steps (1,2)
instnode = self.nodes[op.result]
@@ -844,50 +694,10 @@
instnode.virtual = True
instnode.cursize = op.args[0].getint()
continue
-## elif opname == 'newlist':
-## instnode = self.nodes[op.results[0]]
-## assert isinstance(instnode.cls.source, ListDescr)
-## if not instnode.escaped:
-## instnode.virtual = True
-## valuesource = self.getsource(op.args[2])
-## instnode.cursize = op.args[1].getint()
-## curfields = {}
-## for i in range(instnode.cursize):
-## curfields[i] = InstanceNode(valuesource,
-## const=True)
-## instnode.curfields = curfields
-## continue
-## elif opname == 'append':
-## instnode = self.nodes[op.args[1]]
-## valuenode = self.getnode(op.args[2])
-## if not instnode.escaped:
-## ofs = instnode.cursize
-## instnode.cursize += 1
-## self.optimize_setfield(instnode, ofs, valuenode, op.args[2])
-## continue
-## elif opname == 'insert':
-## instnode = self.nodes[op.args[1]]
-## if not instnode.escaped:
-## ofs = self.getsource(op.args[2]).getint()
-## valuenode = self.nodes[op.args[3]]
-## self.optimize_insert(instnode, ofs, valuenode, op.args[3])
-## continue
-## elif opname == 'pop':
-## instnode = self.nodes[op.args[1]]
-## if not instnode.escaped:
-## instnode.cursize -= 1
-## ofs = instnode.cursize
-## if self.optimize_getfield(instnode, ofs, op.results[0]):
-## del instnode.curfields[ofs]
-## continue
-## elif opname == 'len' or opname == 'listnonzero':
-## instnode = self.nodes[op.args[1]]
-## if instnode.virtual:
-## continue
elif opnum == rop.SETFIELD_GC:
instnode = self.nodes[op.args[0]]
valuenode = self.nodes[op.args[1]]
- ofs = cpu.ofs_from_descr(op.descr)
+ ofs = op.descr
self.optimize_setfield(instnode, ofs, valuenode, op.args[1])
continue
elif opnum == rop.SETARRAYITEM_GC:
@@ -896,18 +706,10 @@
instnode.cls = InstanceNode(FixedList(op.descr))
ofsbox = self.getsource(op.args[1])
if isinstance(ofsbox, ConstInt):
- ofs = ofsbox.getint()
valuenode = self.getnode(op.args[2])
- self.optimize_setfield(instnode, ofs, valuenode, op.args[2])
+ self.optimize_setfield(instnode, ofsbox, valuenode,
+ op.args[2])
continue
-## elif opname == 'setitem':
-## instnode = self.nodes[op.args[1]]
-## valuenode = self.getnode(op.args[3])
-## ofsbox = self.getsource(op.args[2])
-## if isinstance(ofsbox, ConstInt):
-## ofs = ofsbox.getint()
-## self.optimize_setfield(instnode, ofs, valuenode, op.args[3])
-## continue
elif (opnum == rop.OOISNULL or
opnum == rop.OONONNULL):
instnode = self.getnode(op.args[0])
@@ -954,7 +756,7 @@
opnum != rop.SETARRAYITEM_GC):
# the setfield operations do not clean up caches, although
# they have side effects
- self.cleanup_field_caches(newoperations, cpu)
+ self.cleanup_field_caches(newoperations)
if op.can_raise():
exception_might_have_happened = True
box = op.result
@@ -966,7 +768,7 @@
newoperations[0].specnodes = self.specnodes
self.loop.operations = newoperations
- def cleanup_field_caches(self, newoperations, cpu):
+ def cleanup_field_caches(self, newoperations):
# we need to invalidate everything
for node in self.nodes.values():
for ofs, valuenode in node.dirtyfields.items():
@@ -976,12 +778,11 @@
ld = node.cls.source
assert isinstance(ld, FixedList)
newoperations.append(ResOperation(rop.SETARRAYITEM_GC,
- [node.source, ConstInt(ofs), valuenode.source],
+ [node.source, ofs, valuenode.source],
None, ld.arraydescr))
else:
- descr = cpu.repack_descr(ofs)
newoperations.append(ResOperation(rop.SETFIELD_GC,
- [node.source, valuenode.source], None, descr))
+ [node.source, valuenode.source], None, ofs))
node.dirtyfields = {}
node.cleanfields = {}
@@ -1054,16 +855,15 @@
box = box_from_index(allocated_boxes, allocated_lists,
boxes_from_frame,
index_in_alloc)
- descr = metainterp.cpu.repack_descr(ofs)
metainterp.execute_and_record(rop.SETFIELD_GC,
- [box, fieldbox], descr)
+ [box, fieldbox], ofs)
for index_in_alloc, ad, ofs, index_in_arglist in storage.setitems:
itembox = box_from_index(allocated_boxes, allocated_lists,
boxes_from_frame, index_in_arglist)
box = box_from_index(allocated_boxes, allocated_lists,
boxes_from_frame, index_in_alloc)
metainterp.execute_and_record(rop.SETARRAYITEM_GC,
- [box, ConstInt(ofs), itembox], ad)
+ [box, ofs, itembox], ad)
## if storage.setitems:
## #history.execute_and_record('guard_no_exception', [], 'void', False)
## # XXX this needs to check for exceptions somehow
@@ -1082,14 +882,15 @@
def partition(array, left, right):
- pivot = array[right]
+ last_item = array[right]
+ pivot = last_item.sort_key()
storeindex = left
for i in range(left, right):
- if array[i] <= pivot:
+ if array[i].sort_key() <= pivot:
array[i], array[storeindex] = array[storeindex], array[i]
storeindex += 1
# Move pivot to its final place
- array[storeindex], array[right] = pivot, array[storeindex]
+ array[storeindex], array[right] = last_item, array[storeindex]
return storeindex
def quicksort(array, left, right):
@@ -1099,5 +900,5 @@
quicksort(array, left, pivotnewindex - 1)
quicksort(array, pivotnewindex + 1, right)
-def sort_integers(lst):
+def sort_descrs(lst):
quicksort(lst, 0, len(lst)-1)
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/pyjitpl.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/pyjitpl.py Thu Mar 12 15:11:19 2009
@@ -512,14 +512,14 @@
## 'len', [builtin.len_func, box], 'int')
## self.generate_guard(pc, "guard_len", box, [intbox])
- @arguments("orgpc", "box", "virtualizabledesc", "int")
+ @arguments("orgpc", "box", "virtualizabledesc", "constbox")
def opimpl_guard_nonvirtualized(self, pc, box, vdesc, guard_field):
clsbox = self.cls_of_box(box)
op = self.generate_guard(pc, rop.GUARD_NONVIRTUALIZED, box,
[clsbox])
if op:
op.vdesc = vdesc
- op.descr = guard_field
+ op.setdescr(guard_field)
@arguments("box")
def opimpl_keepalive(self, box):
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/resoperation.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/resoperation.py Thu Mar 12 15:11:19 2009
@@ -18,15 +18,20 @@
vdesc = None
def __init__(self, opnum, args, result, descr=None):
- from pypy.jit.metainterp.history import AbstractValue
assert isinstance(opnum, int)
self.opnum = opnum
self.args = list(args)
assert not isinstance(result, list)
self.result = result
+ self.setdescr(descr)
+
+ def setdescr(self, descr):
# for 'call', 'new', 'getfield_gc'...: the descr is a number provided
- # by the backend holding details about the type of the operation
+ # by the backend holding details about the type of the operation --
+ # actually an instance of a class, typically Descr, that inherits
+ # from AbstractValue
if descr is not None:
+ from pypy.jit.metainterp.history import AbstractValue
assert isinstance(descr, AbstractValue)
self.descr = descr
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/specnode.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/specnode.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/specnode.py Thu Mar 12 15:11:19 2009
@@ -1,10 +1,9 @@
from pypy.jit.metainterp.resoperation import ResOperation, rop
-from pypy.jit.metainterp.history import ConstInt
from pypy.jit.metainterp import executor
class SpecNode(object):
- def expand_boxlist(self, instnode, newboxlist, start, cpu):
+ def expand_boxlist(self, instnode, newboxlist, start):
newboxlist.append(instnode.source)
def extract_runtime_data(self, cpu, valuebox, resultlist):
@@ -119,16 +118,15 @@
return False
return True
- def expand_boxlist(self, instnode, newboxlist, start, cpu):
+ def expand_boxlist(self, instnode, newboxlist, start):
for ofs, subspecnode in self.fields:
subinstnode = instnode.curfields[ofs] # should really be there
- subspecnode.expand_boxlist(subinstnode, newboxlist, start, cpu)
+ subspecnode.expand_boxlist(subinstnode, newboxlist, start)
def extract_runtime_data(self, cpu, valuebox, resultlist):
for ofs, subspecnode in self.fields:
- descr = cpu.repack_descr(ofs)
fieldbox = executor.execute(cpu, rop.GETFIELD_GC,
- [valuebox], descr)
+ [valuebox], ofs)
subspecnode.extract_runtime_data(cpu, fieldbox, resultlist)
def adapt_to(self, instnode):
@@ -137,10 +135,9 @@
class VirtualizedSpecNode(SpecNodeWithFields):
- def expand_boxlist(self, instnode, newboxlist, start, cpu):
+ def expand_boxlist(self, instnode, newboxlist, start):
newboxlist.append(instnode.source)
- SpecNodeWithFields.expand_boxlist(self, instnode, newboxlist, start,
- cpu)
+ SpecNodeWithFields.expand_boxlist(self, instnode, newboxlist, start)
def extract_runtime_data(self, cpu, valuebox, resultlist):
resultlist.append(valuebox)
@@ -152,7 +149,7 @@
class DelayedSpecNode(VirtualizedSpecNode):
- def expand_boxlist(self, instnode, newboxlist, oplist, cpu):
+ def expand_boxlist(self, instnode, newboxlist, oplist):
newboxlist.append(instnode.source)
for ofs, subspecnode in self.fields:
assert isinstance(subspecnode, SpecNodeWithBox)
@@ -164,15 +161,14 @@
newboxlist.append(instnode.cleanfields[ofs].source)
else:
box = subspecnode.box.clonebox()
- descr = cpu.repack_descr(ofs)
oplist.append(ResOperation(rop.GETFIELD_GC,
- [instnode.source], box, descr))
+ [instnode.source], box, ofs))
newboxlist.append(box)
class DelayedFixedListSpecNode(DelayedSpecNode):
- def expand_boxlist(self, instnode, newboxlist, oplist, cpu):
- from pypy.jit.metainterp.history import ResOperation, ConstInt
+ def expand_boxlist(self, instnode, newboxlist, oplist):
+ from pypy.jit.metainterp.history import ResOperation
from pypy.jit.metainterp.resoperation import rop
from pypy.jit.metainterp.optimize import FixedList
@@ -191,7 +187,7 @@
else:
box = subspecnode.box.clonebox()
oplist.append(ResOperation(rop.GETARRAYITEM_GC,
- [instnode.source, ConstInt(ofs)], box, arraydescr))
+ [instnode.source, ofs], box, arraydescr))
newboxlist.append(box)
def extract_runtime_data(self, cpu, valuebox, resultlist):
@@ -204,7 +200,7 @@
arraydescr = cls.arraydescr
for ofs, subspecnode in self.fields:
fieldbox = executor.execute(cpu, rop.GETARRAYITEM_GC,
- [valuebox, ConstInt(ofs)], arraydescr)
+ [valuebox, ofs], arraydescr)
subspecnode.extract_runtime_data(cpu, fieldbox, resultlist)
class VirtualizableSpecNode(VirtualizedSpecNode):
@@ -244,7 +240,6 @@
def extract_runtime_data(self, cpu, valuebox, resultlist):
from pypy.jit.metainterp.resoperation import rop
- from pypy.jit.metainterp.history import ConstInt
from pypy.jit.metainterp.optimize import FixedList
for ofs, subspecnode in self.fields:
@@ -252,5 +247,5 @@
assert isinstance(cls, FixedList)
arraydescr = cls.arraydescr
fieldbox = executor.execute(cpu, rop.GETARRAYITEM_GC,
- [valuebox, ConstInt(ofs)], arraydescr)
+ [valuebox, ofs], arraydescr)
subspecnode.extract_runtime_data(cpu, fieldbox, resultlist)
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_list_optimize.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_list_optimize.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_list_optimize.py Thu Mar 12 15:11:19 2009
@@ -33,7 +33,7 @@
spec.find_nodes()
node = spec.nodes[A.l]
assert isinstance(node.cls.source, FixedList)
- assert node.expanded_fields.keys() == [0]
+ assert node.expanded_fields.keys() == [ConstInt(0)]
def test_A_intersect():
spec = PerfectSpecializer(Loop(A.ops))
@@ -45,7 +45,7 @@
spec = PerfectSpecializer(Loop(A.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation(rop.MERGE_POINT, [A.l, A.e0], None),
ResOperation(rop.SETARRAYITEM_GC, [A.l, ConstInt(0), A.e0], None, A.ad),
@@ -74,7 +74,7 @@
spec = PerfectSpecializer(Loop(B.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation(rop.MERGE_POINT, [B.l, B.e0], None),
ResOperation(rop.INT_ADD, [B.e0, ConstInt(1)], B.e1),
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_optimize.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_optimize.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_optimize.py Thu Mar 12 15:11:19 2009
@@ -133,7 +133,7 @@
spec = PerfectSpecializer(Loop(A.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [A.sum, A.v], None),
ResOperation('int_sub', [A.v, ConstInt(1)], A.v2),
@@ -181,7 +181,7 @@
spec = PerfectSpecializer(Loop(B.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [B.sum, B.n1], None),
# guard_class is gone
@@ -236,7 +236,7 @@
spec = PerfectSpecializer(Loop(C.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [C.sum, C.n1], None),
# guard_class is gone
@@ -295,7 +295,7 @@
spec = PerfectSpecializer(Loop(E.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [E.sum, E.v], None),
# guard_class is gone
@@ -327,7 +327,7 @@
spec = PerfectSpecializer(Loop(E.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
guard_op = spec.loop.operations[-2]
v_sum_b = BoxInt(13)
v_v_b = BoxInt(14)
@@ -385,7 +385,7 @@
spec.find_nodes()
spec.intersect_input_and_output()
assert spec.nodes[F.n3].escaped
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [F.sum, F.v, F.n3], None),
ResOperation('int_sub', [F.v, ConstInt(1)], F.v2),
@@ -417,7 +417,7 @@
spec = PerfectSpecializer(Loop(F2.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, F2.ops)
# ____________________________________________________________
@@ -447,7 +447,7 @@
spec = PerfectSpecializer(Loop(G.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [G.sum, G.v], None),
# guard_class is gone
@@ -577,7 +577,7 @@
spec = PerfectSpecializer(Loop(K0.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
v4 = spec.loop.operations[-1].args[-1]
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [K0.sum, K0.n1, K0.v], None),
@@ -611,7 +611,7 @@
spec = PerfectSpecializer(Loop(K1.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
v4 = spec.loop.operations[-1].args[-1]
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [K1.sum, K1.n1, K1.v], None),
@@ -644,7 +644,7 @@
spec = PerfectSpecializer(Loop(K.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [K.sum, K.n1, K.v], None),
ResOperation('int_sub', [K.v, ConstInt(1)], K.v2),
@@ -675,7 +675,7 @@
spec = PerfectSpecializer(Loop(L.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [L.sum, L.n1, L.v], None),
ResOperation('int_sub', [L.v, ConstInt(1)], L.v2),
@@ -706,7 +706,7 @@
spec = PerfectSpecializer(Loop(M.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
v4 = spec.loop.operations[-1].args[-1]
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [M.sum, M.n1, M.v], None),
@@ -737,7 +737,7 @@
spec = PerfectSpecializer(Loop(N.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
v4 = spec.loop.operations[-1].args[-1]
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [N.sum, N.n1, N.v], None),
@@ -766,7 +766,7 @@
spec = PerfectSpecializer(Loop(O1.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [], None),
ResOperation('escape', [], O1.n1),
@@ -796,7 +796,7 @@
spec = PerfectSpecializer(Loop(O2.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [], None),
ResOperation('escape', [], O2.n1),
@@ -828,7 +828,7 @@
spec = PerfectSpecializer(Loop(O3.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [], None),
ResOperation('escape', [], O3.n1),
Modified: pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_vable_optimize.py
==============================================================================
--- pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_vable_optimize.py (original)
+++ pypy/branch/pyjitpl5/pypy/jit/metainterp/test/test_vable_optimize.py Thu Mar 12 15:11:19 2009
@@ -118,7 +118,7 @@
spec = PerfectSpecializer(Loop(A.ops))
spec.find_nodes()
spec.intersect_input_and_output()
- spec.optimize_loop(None)
+ spec.optimize_loop()
equaloplists(spec.loop.operations, [
ResOperation('merge_point', [A.sum, A.fr, A.v], None),
ResOperation('int_sub', [A.v, ConstInt(1)], A.v2),
More information about the Pypy-commit
mailing list