[pypy-svn] r78570 - pypy/branch/arm-backend/pypy/jit/backend/arm

david at codespeak.net david at codespeak.net
Sat Oct 30 23:02:31 CEST 2010


Author: david
Date: Sat Oct 30 23:02:28 2010
New Revision: 78570

Modified:
   pypy/branch/arm-backend/pypy/jit/backend/arm/codebuilder.py
   pypy/branch/arm-backend/pypy/jit/backend/arm/conditions.py
   pypy/branch/arm-backend/pypy/jit/backend/arm/opassembler.py
Log:
Implement int and uint comparison operations

Modified: pypy/branch/arm-backend/pypy/jit/backend/arm/codebuilder.py
==============================================================================
--- pypy/branch/arm-backend/pypy/jit/backend/arm/codebuilder.py	(original)
+++ pypy/branch/arm-backend/pypy/jit/backend/arm/codebuilder.py	Sat Oct 30 23:02:28 2010
@@ -16,12 +16,12 @@
         """Generates a call to a helper function, takes its
         arguments in r0 and r1, result is placed in r0"""
         self.ensure_can_fit(self.size_of_gen_load_int*2+3*WORD)
-        self.PUSH(range(2, 12), cond=cond)
+        self.PUSH(range(2, 4), cond=cond)
         addr = rffi.cast(lltype.Signed, llhelper(signature, function))
         self.gen_load_int(reg.r2.value, addr, cond=cond)
         self.gen_load_int(reg.lr.value, self.curraddr()+self.size_of_gen_load_int+WORD, cond=cond)
         self.MOV_rr(reg.pc.value, reg.r2.value, cond=cond)
-        self.LDM(reg.sp.value, range(2, 12), w=1, cond=cond) # XXX Replace with POP instr. someday
+        self.LDM(reg.sp.value, range(2, 4), w=1, cond=cond) # XXX Replace with POP instr. someday
     return f
 
 class AbstractARMv7Builder(object):
@@ -49,15 +49,6 @@
         instr = self._encode_reg_list(instr, regs)
         self.write32(instr)
 
-    def CMP(self, rn, imm, cond=cond.AL):
-        if 0 <= imm <= 255:
-            self.write32(cond << 28
-                        | 0x35 << 20
-                        | (rn & 0xFF) <<  16
-                        | (imm & 0xFFF))
-        else:
-            raise NotImplentedError
-
     def BKPT(self, cond=cond.AL):
         self.write32(cond << 28 | 0x1200070)
 

Modified: pypy/branch/arm-backend/pypy/jit/backend/arm/conditions.py
==============================================================================
--- pypy/branch/arm-backend/pypy/jit/backend/arm/conditions.py	(original)
+++ pypy/branch/arm-backend/pypy/jit/backend/arm/conditions.py	Sat Oct 30 23:02:28 2010
@@ -13,3 +13,8 @@
 GT = 0xC
 LE = 0xD
 AL = 0xE
+
+opposites = [NE, EQ, CC, CS, PL, MI, VC, VS, LS, HI, LT, GE, LE, GT, AL]
+def get_opposite_of(operation):
+    return opposites[operation]
+

Modified: pypy/branch/arm-backend/pypy/jit/backend/arm/opassembler.py
==============================================================================
--- pypy/branch/arm-backend/pypy/jit/backend/arm/opassembler.py	(original)
+++ pypy/branch/arm-backend/pypy/jit/backend/arm/opassembler.py	Sat Oct 30 23:02:28 2010
@@ -52,6 +52,32 @@
         return fcond
     return f
 
+def gen_emit_cmp_op(condition, inverse=False):
+    def f(self, op, regalloc, fcond):
+        assert fcond == c.AL
+        if not inverse:
+            arg0 = op.getarg(0)
+            arg1 = op.getarg(1)
+        else:
+            arg0 = op.getarg(1)
+            arg1 = op.getarg(0)
+        res = regalloc.try_allocate_reg(op.result)
+        # XXX consider swapping argumentes if arg0 is const
+        if self._check_imm_arg(arg1) and not isinstance(arg0, ConstInt):
+            reg = regalloc.try_allocate_reg(arg0)
+            self.mc.CMP_ri(reg.value, imm=arg1.getint(), cond=fcond)
+        else:
+            reg = self._put_in_reg(arg0, regalloc)
+            reg2 = self._put_in_reg(arg1, regalloc)
+            self.mc.CMP_rr(reg.value, reg2.value)
+            regalloc.possibly_free_var(reg2)
+
+        inv = c.get_opposite_of(condition)
+        self.mc.MOV_ri(res.value, 1, cond=condition)
+        self.mc.MOV_ri(res.value, 0, cond=inv)
+        return condition
+    return f
+
 class IntOpAsslember(object):
     _mixin_ = True
 
@@ -137,8 +163,21 @@
     emit_op_int_rshift = gen_emit_op_ri('ASR', imm_size=0x1F, commutative=False)
     emit_op_uint_rshift = gen_emit_op_ri('LSR', imm_size=0x1F, commutative=False)
 
+    emit_op_int_lt = gen_emit_cmp_op(c.LT)
+    emit_op_int_le = gen_emit_cmp_op(c.LE)
+    emit_op_int_eq = gen_emit_cmp_op(c.EQ)
+    emit_op_int_ne = gen_emit_cmp_op(c.NE)
+    emit_op_int_gt = gen_emit_cmp_op(c.GT)
+    emit_op_int_ge = gen_emit_cmp_op(c.GE)
 
-    def _check_imm_arg(self, arg, size):
+    emit_op_uint_le = gen_emit_cmp_op(c.LS)
+    emit_op_uint_gt = gen_emit_cmp_op(c.HI)
+
+    emit_op_uint_lt = gen_emit_cmp_op(c.HI, inverse=True)
+    emit_op_uint_ge = gen_emit_cmp_op(c.LS, inverse=True)
+
+
+    def _check_imm_arg(self, arg, size=0xFF):
         #XXX check ranges for different operations
         return isinstance(arg, ConstInt) and arg.getint() <= size and arg.getint() > 0
 
@@ -155,8 +194,10 @@
         descr._arm_guard_cond = fcond
 
     def emit_op_guard_true(self, op, regalloc, fcond):
-        assert fcond == c.GT
-        self._emit_guard(op, regalloc, fcond)
+        assert fcond == c.LE
+        cond = c.get_opposite_of(fcond)
+        assert cond == c.GT
+        self._emit_guard(op, regalloc, cond)
         return c.AL
 
     def emit_op_guard_false(self, op, regalloc, fcond):
@@ -183,17 +224,5 @@
         return fcond
 
     def emit_op_finish(self, op, regalloc, fcond):
-        self._gen_path_to_exit_path(op, op.getarglist(), regalloc, fcond)
+        self._gen_path_to_exit_path(op, op.getarglist(), regalloc, c.AL)
         return fcond
-
-    def emit_op_int_le(self, op, regalloc, fcond):
-        reg = regalloc.try_allocate_reg(op.getarg(0))
-        assert isinstance(op.getarg(1), ConstInt)
-        self.mc.CMP(reg.value, op.getarg(1).getint())
-        return c.GT
-
-    def emit_op_int_eq(self, op, regalloc, fcond):
-        reg = regalloc.try_allocate_reg(op.getarg(0))
-        assert isinstance(op.getarg(1), ConstInt)
-        self.mc.CMP(reg.value, op.getarg(1).getint())
-        return c.EQ



More information about the Pypy-commit mailing list