[pypy-commit] pypy guard-compatible: 32-bit support

arigo pypy.commits at gmail.com
Mon Mar 14 08:30:07 EDT 2016


Author: Armin Rigo <arigo at tunes.org>
Branch: guard-compatible
Changeset: r83039:4e72a2558a80
Date: 2016-03-14 13:29 +0100
http://bitbucket.org/pypy/pypy/changeset/4e72a2558a80/

Log:	32-bit support

diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -1732,8 +1732,6 @@
         loc_reg, loc_imm = locs
         assert isinstance(loc_reg, RegLoc)
         assert isinstance(loc_imm, ImmedLoc)
-        if IS_X86_32:
-            XXX
         guard_compat.generate_guard_compatible(self, guard_token,
                                                loc_reg, loc_imm.value)
 
diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py
--- a/rpython/jit/backend/x86/guard_compat.py
+++ b/rpython/jit/backend/x86/guard_compat.py
@@ -1,9 +1,9 @@
 from rpython.rlib import rgc
 from rpython.rlib.objectmodel import we_are_translated
 from rpython.rtyper.lltypesystem import lltype, rffi
-from rpython.jit.backend.x86.arch import WORD
+from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64
 from rpython.jit.backend.x86 import rx86, codebuf
-from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm
+from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm, eax, edx
 from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
 from rpython.jit.metainterp.compile import GuardCompatibleDescr
 from rpython.jit.metainterp.history import BasicFailDescr
@@ -18,9 +18,13 @@
 def generate_guard_compatible(assembler, guard_token, loc_reg, initial_value):
     # fast-path check
     mc = assembler.mc
-    mc.MOV_ri64(X86_64_SCRATCH_REG.value, initial_value)
-    rel_pos_compatible_imm = mc.get_relative_pos()
-    mc.CMP_rr(loc_reg.value, X86_64_SCRATCH_REG.value)
+    if IS_X86_64:
+        mc.MOV_ri64(X86_64_SCRATCH_REG.value, initial_value)
+        rel_pos_compatible_imm = mc.get_relative_pos()
+        mc.CMP_rr(loc_reg.value, X86_64_SCRATCH_REG.value)
+    elif IS_X86_32:
+        mc.CMP_ri32(loc_reg.value, initial_value)
+        rel_pos_compatible_imm = mc.get_relative_pos()
     mc.J_il8(rx86.Conditions['E'], 0)
     je_location = mc.get_relative_pos()
 
@@ -34,9 +38,13 @@
     compatinfo[1] = initial_value
     compatinfo[2] = -1
 
-    mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr)  # patchable
-    guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD
-    mc.PUSH_r(X86_64_SCRATCH_REG.value)
+    if IS_X86_64:
+        mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr)  # patchable
+        guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD
+        mc.PUSH_r(X86_64_SCRATCH_REG.value)
+    elif IS_X86_32:
+        mc.PUSH_i32(compatinfoaddr)   # patchable
+        guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD
     mc.CALL(imm(checker))
     mc.stack_frame_size_delta(-WORD)
 
@@ -113,21 +121,36 @@
 
     mc = codebuf.MachineCodeBlockWrapper()
 
-    mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD)
+    if IS_X86_64:
+        tmp = X86_64_SCRATCH_REG.value
+        stack_ret = 0
+        stack_arg = WORD
+    elif IS_X86_32:
+        if regnum != eax.value:
+            tmp = eax.value
+        else:
+            tmp = edx.value
+        mc.PUSH_r(tmp)
+        stack_ret = WORD
+        stack_arg = 2 * WORD
+
+    mc.MOV_rs(tmp, stack_arg)
 
     pos = mc.get_relative_pos()
-    mc.CMP_mr((X86_64_SCRATCH_REG.value, WORD), regnum)
+    mc.CMP_mr((tmp, WORD), regnum)
     mc.J_il8(rx86.Conditions['E'], 0)    # patched below
     je_location = mc.get_relative_pos()
-    mc.CMP_mi((X86_64_SCRATCH_REG.value, WORD), -1)
-    mc.LEA_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, WORD))
+    mc.CMP_mi((tmp, WORD), -1)
+    mc.LEA_rm(tmp, (tmp, WORD))
     mc.J_il8(rx86.Conditions['NE'], pos - (mc.get_relative_pos() + 2))
 
     # not found!  The condition code is already 'Zero', which we return
     # to mean 'not found'.
+    if IS_X86_32:
+        mc.POP_r(tmp)
     mc.RET16_i(WORD)
 
-    mc.force_frame_size(WORD)
+    mc.force_frame_size(8)   # one word on X86_64, two words on X86_32
 
     # patch the JE above
     offset = mc.get_relative_pos() - je_location
@@ -137,13 +160,15 @@
     # found!  update the assembler by writing the value at 'small_ofs'
     # bytes before our return address.  This should overwrite the const in
     # 'MOV_ri64(r11, const)', first instruction of the guard_compatible.
-    mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD)
-    mc.MOV_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, 0))
-    mc.ADD_rs(X86_64_SCRATCH_REG.value, 0)
-    mc.MOV_mr((X86_64_SCRATCH_REG.value, -WORD), regnum)
+    mc.MOV_rs(tmp, stack_arg)
+    mc.MOV_rm(tmp, (tmp, 0))
+    mc.ADD_rs(tmp, stack_ret)
+    mc.MOV_mr((tmp, -WORD), regnum)
 
     # the condition codes say 'Not Zero', as a result of the ADD above.
     # Return this condition code to mean 'found'.
+    if IS_X86_32:
+        mc.POP_r(tmp)
     mc.RET16_i(WORD)
 
     addr = mc.materialize(assembler.cpu, [])
diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py
--- a/rpython/jit/backend/x86/rx86.py
+++ b/rpython/jit/backend/x86/rx86.py
@@ -404,7 +404,7 @@
     INSN_bi._always_inline_ = True      # try to constant-fold single_byte()
 
     return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj,
-            INSN_ji8, INSN_mi8, INSN_rs)
+            INSN_ji8, INSN_mi8, INSN_rs, INSN_ri32)
 
 def select_8_or_32_bit_immed(insn_8, insn_32):
     def INSN(*args):
@@ -506,13 +506,13 @@
     INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1))
     INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1))
 
-    AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_,ADD_rs = common_modes(0)
-    OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_,_ = common_modes(1)
-    AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_,_ = common_modes(4)
-    SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8,_ = common_modes(5)
-    SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_,_ = common_modes(3)
-    XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_ = common_modes(6)
-    CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_ = common_modes(7)
+    AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_,ADD_rs, _ = common_modes(0)
+    OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_,_,_ = common_modes(1)
+    AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_,_,_ = common_modes(4)
+    SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8,_,_ = common_modes(5)
+    SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_,_,_ = common_modes(3)
+    XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_,_ = common_modes(6)
+    CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_,CMP_ri32 = common_modes(7)
 
     def ADD_ri(self, reg, immed):
         self.AD1_ri(reg, immed)
@@ -609,6 +609,10 @@
             self.PUS1_i32(immed)
         self.stack_frame_size_delta(+self.WORD)
 
+    def PUSH_i32(self, immed):
+        self.PUS1_i32(immed)
+        self.stack_frame_size_delta(+self.WORD)
+
     PO1_r = insn(rex_nw, register(1), '\x58')
     PO1_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1))
 


More information about the pypy-commit mailing list