[pypy-svn] r74721 - in pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86: . tool

jcreigh at codespeak.net jcreigh at codespeak.net
Mon May 24 22:24:35 CEST 2010


Author: jcreigh
Date: Mon May 24 22:24:33 2010
New Revision: 74721

Modified:
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh
Log:
a little progress on using rx86 from assembler.py

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	Mon May 24 22:24:33 2010
@@ -16,7 +16,7 @@
                                          xmm0, xmm1, xmm2, xmm3,
                                          xmm4, xmm5, xmm6, xmm7,
                                          RegLoc, StackLoc,
-                                         ImmedLoc, AddressLoc, imm)
+                                         ImmedLoc, AddressLoc, imm, rel32)
 
 from pypy.rlib.objectmodel import we_are_translated, specialize
 from pypy.jit.backend.x86 import rx86, regloc, codebuf
@@ -527,14 +527,15 @@
 
     def _cmpop(cond, rev_cond):
         def genop_cmp(self, op, arglocs, result_loc):
+            # Clear high bits
+            self.mc.MOV_ri(result_loc.value, 0)
             rl = result_loc.lowest8bits()
             if isinstance(op.args[0], Const):
                 self.mc.CMP(arglocs[1], arglocs[0])
-                getattr(self.mc, 'SET' + rev_cond)(rl)
+                self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value)
             else:
                 self.mc.CMP(arglocs[0], arglocs[1])
-                getattr(self.mc, 'SET' + cond)(rl)
-            self.mc.MOVZX(result_loc, rl)
+                self.mc.SET_ir(rx86.Conditions[cond], rl.value)
         return genop_cmp
 
     def _cmpop_float(cond, is_ne=False):
@@ -617,11 +618,12 @@
                     mc.MOV_sr(p, tmp.value)
             p += round_up_to_4(loc.width)
         self._regalloc.reserve_param(p//WORD)
-        mc.CALL_l(x)
+        # x is a location
+        mc.CALL(x)
         self.mark_gc_roots()
         
     def call(self, addr, args, res):
-        self._emit_call(addr, args)
+        self._emit_call(rel32(addr), args)
         assert res is eax
 
     genop_int_neg = _unaryop("NEG")
@@ -940,16 +942,16 @@
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR,
                                              self.cpu.translate_support_code)
         assert itemsize == 1
-        self.mc.MOVZX(resloc, addr8_add(base_loc, ofs_loc, basesize))
+        self.mc.MOVZX8(resloc, AddressLoc(base_loc, ofs_loc, 0, basesize))
 
     def genop_unicodegetitem(self, op, arglocs, resloc):
         base_loc, ofs_loc = arglocs
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
                                              self.cpu.translate_support_code)
         if itemsize == 4:
-            self.mc.MOV(resloc, addr_add(base_loc, ofs_loc, basesize, 2))
+            self.mc.MOV(resloc, AddressLoc(base_loc, ofs_loc, 2, basesize))
         elif itemsize == 2:
-            self.mc.MOVZX(resloc, addr_add(base_loc, ofs_loc, basesize, 1))
+            self.mc.MOVZX16(resloc, AddressLoc(base_loc, ofs_loc, 1, basesize))
         else:
             assert 0, itemsize
 
@@ -961,7 +963,7 @@
 
     def genop_guard_guard_no_exception(self, ign_1, guard_op, addr,
                                        locs, ign_2):
-        self.mc.CMP(heap(self.cpu.pos_exception()), imm(0))
+        self.mc.CMP_ji(self.cpu.pos_exception(), 0)
         return self.implement_guard(addr, 'NZ')
 
     def genop_guard_guard_exception(self, ign_1, guard_op, addr,
@@ -1421,7 +1423,7 @@
         
         self._emit_call(x, arglocs, 2, tmp=tmp)
 
-        if isinstance(resloc, MODRM64):
+        if isinstance(resloc, StackLoc) and resloc.width == 8:
             self.mc.FSTP(resloc)
         elif size == 1:
             self.mc.AND(eax, imm(0xff))
@@ -1590,6 +1592,14 @@
         num = getattr(rop, opname.upper())
         genop_list[num] = value
 
+def addr_add_const(reg_or_imm1, offset):
+    # XXX: ri386 migration shim
+    return AddressLoc(reg_or_imm1, ImmedLoc(0), 0, offset)
+
+def mem(loc, offset):
+    # XXX: ri386 migration shim
+    return AddressLoc(loc, ImmedLoc(0), (0), offset)
+
 def round_up_to_4(size):
     if size < 4:
         return 4

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py	Mon May 24 22:24:33 2010
@@ -95,7 +95,7 @@
 
     def convert_to_imm(self, c):
         adr = self.float_constants.record_float(c.getfloat())
-        return heap64(adr)
+        return AddressLoc(ImmedLoc(adr), ImmedLoc(0), 0, 0)
         
     def after_call(self, v):
         # the result is stored in st0, but we don't have this around,

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py	Mon May 24 22:24:33 2010
@@ -124,6 +124,11 @@
             getattr(self, name + '_' + code1 + code2)(loc1.value, loc2.value)
         return INSN
 
+    def _unaryop(name):
+        def INSN(self, loc):
+            getattr(self, name + '_' + loc.location_code())(loc.value)
+        return INSN
+
     ADD = _binaryop('ADD')
     OR  = _binaryop('OR')
     XOR = _binaryop('XOR')
@@ -131,10 +136,29 @@
     AND = _binaryop('AND')
     SUB = _binaryop('SUB')
     IMUL = _binaryop('IMUL')
+    NEG = _unaryop('NEG')
 
+    CMP = _binaryop('CMP')
     MOV = _binaryop('MOV')
     MOV8 = _binaryop('MOV8')
+    MOVZX8 = _binaryop("MOVZX8")
+    MOVZX16 = _binaryop("MOVZX16")
+
     MOVSD = _binaryop('MOVSD')
+    ADDSD = _binaryop('ADDSD')
+    SUBSD = _binaryop('SUBSD')
+    MULSD = _binaryop('MULSD')
+    DIVSD = _binaryop('DIVSD')
+    UCOMISD = _binaryop('UCOMISD')
+
+
+    def CALL(self, loc):
+        # FIXME: Kludge that works in 32-bit because the "relative" CALL is
+        # actually absolute on i386
+        if loc.location_code() == 'j':
+            self.CALL_l(loc.value)
+        else:
+            getattr(self, 'CALL_' + loc.location_code())(loc.value)
 
     def MOV16(self, dest_loc, src_loc):
         # Select 16-bit operand mode
@@ -149,23 +173,6 @@
         assert isinstance(loc, RegLoc)
         self.POP_r(loc.value)
 
-    def CMP(self, loc0, loc1):
-        if isinstance(loc0, RegLoc):
-            val0 = loc0.value
-            if isinstance(loc1, RegLoc):
-                self.CMP_rr(val0, loc1.value)
-            elif isinstance(loc1, StackLoc):
-                self.CMP_rb(val0, loc1.value)
-            else:
-                self.CMP_ri(val0, loc1.getint())
-        else:
-            assert isinstance(loc0, StackLoc)
-            val0 = loc0.value
-            if isinstance(loc1, RegLoc):
-                self.CMP_br(val0, loc1.value)
-            else:
-                self.CMP_bi(val0, loc1.getint())
-
     def CMPi(self, loc0, loc1):
         # like CMP, but optimized for the case of loc1 being a Const
         assert isinstance(loc1, Const)
@@ -182,6 +189,10 @@
     else:
         return ImmedLoc(x)
 
+def rel32(x):
+    # XXX: ri386 migration shim
+    return AddressLoc(ImmedLoc(x), ImmedLoc(0))
+
 all_extra_instructions = [name for name in LocationCodeBuilder.__dict__
                           if name[0].isupper()]
 all_extra_instructions.sort()

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py	Mon May 24 22:24:33 2010
@@ -310,6 +310,17 @@
 
     return INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br
 
+def select_8_or_32_bit_immed(insn_8, insn_32):
+    def INSN(*args):
+        immed = args[-1]
+        if single_byte(immed):
+            insn_8(*args)
+        else:
+            assert fits_in_32bits(immed)
+            insn_32(*args)
+
+    return INSN
+
 # ____________________________________________________________
 
 
@@ -357,11 +368,15 @@
     # "MOV reg1, [immediate2]" and the opposite direction
     MOV_rj = insn(rex_w, '\x8B', register(1,8), '\x05', immediate(2))
     MOV_jr = insn(rex_w, '\x89', register(2,8), '\x05', immediate(1))
+    MOV_ji = insn(rex_w, '\xC7', '\x05', immediate(1), immediate(2))
 
     MOV8_mr = insn(rex_w, '\x88', register(2, 8), mem_reg_plus_const(1))
 
     MOVZX8_rm = insn(rex_w, '\x0F\xB6', register(1,8), mem_reg_plus_const(2))
+    MOVZX8_ra = insn(rex_w, '\x0F\xB6', register(1,8), mem_reg_plus_scaled_reg_plus_const(2))
+
     MOVZX16_rm = insn(rex_w, '\x0F\xB7', register(1,8), mem_reg_plus_const(2))
+    MOVZX16_ra = insn(rex_w, '\x0F\xB7', register(1,8), mem_reg_plus_scaled_reg_plus_const(2))
 
     # ------------------------------ Arithmetic ------------------------------
 
@@ -372,25 +387,28 @@
     XOR_ri, XOR_rr, XOR_rb, _, _ = common_modes(6)
     CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br = common_modes(7)
 
+    _CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b'))
+    _CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2))
+    CMP_mi = select_8_or_32_bit_immed(_CMP_mi8, _CMP_mi32)
+
+    _CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b'))
+    _CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2))
+    CMP_ji = select_8_or_32_bit_immed(_CMP_ji8, _CMP_ji32)
+
+    NEG_r = insn(rex_w, '\xF7', register(1), '\xD8')
+
     DIV_r = insn(rex_w, '\xF7', register(1), '\xF0')
     IDIV_r = insn(rex_w, '\xF7', register(1), '\xF8')
 
     IMUL_rr = insn(rex_w, '\x0F\xAF', register(1, 8), register(2), '\xC0')
     IMUL_rb = insn(rex_w, '\x0F\xAF', register(1, 8), stack_bp(2))
-    # 8-bit immediate
+
     _IMUL_rri8 = insn(rex_w, '\x6B', register(1, 8), register(2), '\xC0', immediate(3, 'b'))
-    # 32-bit immediate
     _IMUL_rri32 = insn(rex_w, '\x69', register(1, 8), register(2), '\xC0', immediate(3))
-
-    def IMUL_rri(self, reg1, reg2, immed):
-        if single_byte(immed):
-            self._IMUL_rri8(reg1, reg2, immed)
-        else:
-            assert fits_in_32bits(immed)
-            self._IMUL_rri32(reg1, reg2, immed)
+    IMUL_rri = select_8_or_32_bit_immed(_IMUL_rri8, _IMUL_rri32)
 
     def IMUL_ri(self, reg, immed):
-        return self.IMUL_rri(reg, reg, immed)
+        self.IMUL_rri(reg, reg, immed)
 
     # ------------------------------ Misc stuff ------------------------------
 
@@ -433,6 +451,28 @@
     MOVSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x10', register(1, 8), '\x05', immediate(2))
     MOVSD_jr = xmminsn('\xF2', rex_nw, '\x0F\x11', register(2, 8), '\x05', immediate(1))
 
+    # Arithmetic
+    ADDSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x58', register(1, 8), register(2), '\xC0')
+    ADDSD_rb = xmminsn('\xF2', rex_nw, '\x0F\x58', register(1, 8), stack_bp(2))
+    ADDSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x58', register(1, 8), '\x05', immediate(2))
+
+    SUBSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x5C', register(1, 8), register(2), '\xC0')
+    SUBSD_rb = xmminsn('\xF2', rex_nw, '\x0F\x5C', register(1, 8), stack_bp(2))
+    SUBSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x5C', register(1, 8), '\x05', immediate(2))
+
+    MULSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x59', register(1, 8), register(2), '\xC0')
+    MULSD_rb = xmminsn('\xF2', rex_nw, '\x0F\x59', register(1, 8), stack_bp(2))
+    MULSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x59', register(1, 8), '\x05', immediate(2))
+
+    DIVSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x5E', register(1, 8), register(2), '\xC0')
+    DIVSD_rb = xmminsn('\xF2', rex_nw, '\x0F\x5E', register(1, 8), stack_bp(2))
+    DIVSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x5E', register(1, 8), '\x05', immediate(2))
+
+    # Comparision
+    UCOMISD_rr = xmminsn('\x66', rex_nw, '\x0F\x2E', register(1, 8), register(2), '\xC0')
+    UCOMISD_rb = xmminsn('\x66', rex_nw, '\x0F\x2E', register(1, 8), stack_bp(2))
+    UCOMISD_rj = xmminsn('\x66', rex_nw, '\x0F\x2E', register(1, 8), '\x05', immediate(2))
+
     # ------------------------------------------------------------
 
 Conditions = {
@@ -499,12 +539,27 @@
         py.test.skip("MOV_rj unsupported")
     def MOV_jr(self, mem_immed, reg):
         py.test.skip("MOV_jr unsupported")
+    def MOV_ji(self, mem_immed, immed):
+        py.test.skip("MOV_ji unsupported")
     def XCHG_rj(self, reg, mem_immed):
         py.test.skip("XCGH_rj unsupported")
+    def CMP_ji(self, addr, immed):
+        py.test.skip("CMP_ji unsupported")
     def MOVSD_rj(self, xmm_reg, mem_immed):
         py.test.skip("MOVSD_rj unsupported")
     def MOVSD_jr(self, xmm_reg, mem_immed):
         py.test.skip("MOVSD_jr unsupported")
+    def ADDSD_rj(self, xxm_reg, mem_immed):
+        py.test.skip("ADDSD_rj unsupported")
+    def SUBSD_rj(self, xxm_reg, mem_immed):
+        py.test.skip("SUBSD_rj unsupported")
+    def MULSD_rj(self, xxm_reg, mem_immed):
+        py.test.skip("MULSD_rj unsupported")
+    def DIVSD_rj(self, xxm_reg, mem_immed):
+        py.test.skip("DIVSD_rj unsupported")
+    def UCOMISD_rj(self, xxm_reg, mem_immed):
+        py.test.skip("UCOMISD_rj unsupported")
+
 
 # ____________________________________________________________
 

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh	Mon May 24 22:24:33 2010
@@ -3,16 +3,18 @@
 # Tool to quickly see how the GNU assembler encodes an instruction
 # (AT&T syntax only for now)
 
+# Command line options are passed on to "as"
+
 # Provide readline if available
 if which rlwrap > /dev/null && [ "$INSIDE_RLWRAP" = "" ]; then
     export INSIDE_RLWRAP=1
-    exec rlwrap "$0"
+    exec rlwrap "$0" "$@"
 fi
 
 while :; do
     echo -n '? '
     read instruction
-    echo "$instruction" | as
+    echo "$instruction" | as "$@"
     objdump --disassemble ./a.out | grep '^ *[0-9a-f]\+:'
     rm -f ./a.out
 done



More information about the Pypy-commit mailing list