[pypy-commit] pypy arm64: (arigo, fijal) call_release_gil first steps

fijal pypy.commits at gmail.com
Tue Jun 25 10:18:20 EDT 2019


Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: arm64
Changeset: r96860:62f3abe7cb9a
Date: 2019-06-25 14:17 +0000
http://bitbucket.org/pypy/pypy/changeset/62f3abe7cb9a/

Log:	(arigo, fijal) call_release_gil first steps

diff --git a/rpython/jit/backend/aarch64/assembler.py b/rpython/jit/backend/aarch64/assembler.py
--- a/rpython/jit/backend/aarch64/assembler.py
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -914,7 +914,7 @@
             self.mc.LDR_ri(r.ip0.value, r.fp.value, loc.value)
             self.mc.STR_ri(r.ip0.value, r.sp.value, pos)
         elif loc.is_vfp_reg():
-            xxx
+            self.mc.STR_di(loc.value, r.sp.value, pos)
         elif loc.is_imm():
             self.mc.gen_load_int(r.ip0.value, loc.value)
             self.mc.STR_ri(r.ip0.value, r.sp.value, pos)
diff --git a/rpython/jit/backend/aarch64/callbuilder.py b/rpython/jit/backend/aarch64/callbuilder.py
--- a/rpython/jit/backend/aarch64/callbuilder.py
+++ b/rpython/jit/backend/aarch64/callbuilder.py
@@ -3,6 +3,7 @@
 from rpython.jit.backend.aarch64.arch import WORD
 from rpython.jit.metainterp.history import INT, FLOAT, REF
 from rpython.jit.backend.aarch64 import registers as r
+from rpython.jit.backend.arm import conditions as c
 from rpython.jit.backend.aarch64.jump import remap_frame_layout # we use arm algo
 
 from rpython.rlib.objectmodel import we_are_translated
@@ -154,18 +155,25 @@
         #     r6 == fastgil
         #     XXX r7 == previous value of root_stack_top
         self.mc.gen_load_int(r.ip1.value, fastgil)
-        self.mc.LDREX(r.x0.value, r.r6.value)    # load the lock value
-        self.mc.MOV_ri(r.ip.value, 1)
-        self.mc.CMP_ri(r.r3.value, 0)            # is the lock free?
-        self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ)
+        self.mc.LDAXR(r.x1.value, r.ip1.value)    # load the lock value
+        self.mc.MOVZ_r_u16(r.ip0.value, 1, 0)
+        self.mc.CMP_ri(r.x1.value, 0)            # is the lock free?
+        if self.asm.cpu.gc_ll_descr.gcrootmap:
+            jump_val = XXX
+        else:
+            jump_val = 3 * 4
+        self.mc.B_ofs_cond(jump_val, c.NE)
+        # jump over the next few instructions directly to the call
+        self.mc.STLXR(r.ip0.value, r.ip1.value, r.x1.value)
                                                  # try to claim the lock
-        self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed?
-        if self.asm.cpu.cpuinfo.arch_version >= 7:
-            self.mc.DMB()
+        self.mc.CMP_wi(r.x1.value, 0) # did this succeed?
+        self.mc.DMB() # <- previous jump here
+        self.mc.B_ofs_cond((8 + 4)* 4, c.EQ) # jump over the call
         # the success of the lock acquisition is defined by
         # 'EQ is true', or equivalently by 'r3 == 0'.
         #
         if self.asm.cpu.gc_ll_descr.gcrootmap:
+            XXX
             # When doing a call_release_gil with shadowstack, there
             # is the risk that the 'rpy_fastgil' was free but the
             # current shadowstack can be the one of a different
@@ -183,18 +191,15 @@
             # by checking again r3.
             self.mc.CMP_ri(r.r3.value, 0)
             self.mc.STR_ri(r.r3.value, r.r6.value, cond=c.EQ)
-        else:
-            b1_location = self.mc.currpos()
-            self.mc.BKPT()                       # BEQ below
         #
         # save the result we just got
-        gpr_to_save, vfp_to_save = self.get_result_locs()
-        with saved_registers(self.mc, gpr_to_save, vfp_to_save):
-            self.mc.BL(self.asm.reacqgil_addr)
-
-        # replace b1_location with B(here, c.EQ)
-        pmc = OverwritingBuilder(self.mc, b1_location, WORD)
-        pmc.B_offs(self.mc.currpos(), c.EQ)
+        self.mc.SUB_ri(r.sp.value, r.sp.value, 2 * WORD)
+        self.mc.STR_di(r.d0.value, r.sp.value, 0)
+        self.mc.STR_ri(r.x0.value, r.sp.value, WORD)
+        self.mc.BL(self.asm.reacqgil_addr)
+        self.mc.LDR_ri(r.d0.value, r.sp.value, 0)
+        self.mc.LDR_ri(r.x0.value, r.sp.value, WORD)
+        self.mc.ADD_ri(r.sp.value, r.sp.value, 2 * WORD)
 
         if not we_are_translated():                    # for testing: now we can accesss
             self.mc.SUB_ri(r.fp.value, r.fp.value, 1)  # fp again
@@ -204,7 +209,7 @@
             return [], []
         if self.resloc.is_vfp_reg():
             if self.restype == 'L':      # long long
-                return [r.r0, r.r1], []
+                return [r.r0], []
             else:
                 return [], [r.d0]
         assert self.resloc.is_core_reg()
diff --git a/rpython/jit/backend/aarch64/codebuilder.py b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -379,6 +379,11 @@
         assert 0 <= imm <= 4095
         self.write32((base << 22) | (imm << 10) | (rn << 5) | 0b11111)
 
+    def CMP_wi(self, rn, imm):
+        base = 0b0111000100
+        assert 0 <= imm <= 4095
+        self.write32((base << 22) | (imm << 10) | (rn << 5) | 0b11111)
+
     def CSET_r_flag(self, rd, cond):
         base = 0b10011010100
         self.write32((base << 21) | (0b11111 << 16) | (cond << 12) | (1 << 10) |
@@ -389,6 +394,14 @@
         base = 0b11101010000
         self.write32((base << 21) | (rm << 16) | (shift << 10) | (rn << 5) | 0b11111)
 
+    def LDAXR(self, rt, rn):
+        base = 0b1100100001011111111111
+        self.write32((base << 10) | (rn << 5) | rt)
+
+    def STLXR(self, rt, rn, rs):
+        base = 0b11001000000
+        self.write32((base << 21) | (rs << 16) | (0b111111 << 10) | (rn << 5) | rt)
+
     def NOP(self):
         self.write32(0b11010101000000110010000000011111)
 
@@ -405,7 +418,7 @@
     def B_ofs_cond(self, ofs, cond):
         base = 0b01010100
         assert ofs & 0x3 == 0
-        assert -1 << 10 < ofs < 1 << 10
+        assert -1 << 21 < ofs < 1 << 21
         imm = ofs >> 2
         if imm < 0:
             xxx
@@ -434,7 +447,7 @@
         self.write32(0b11010100001 << 21)
 
     def DMB(self):
-        self.write32(0b1101010100000011001111110111111)
+        self.write32(0b11010101000000110011111110111111)
 
     def gen_load_int_full(self, r, value):
         self.MOVZ_r_u16(r, value & 0xFFFF, 0)
diff --git a/rpython/jit/backend/aarch64/opassembler.py b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -165,10 +165,14 @@
         self.emit_int_comp_op(op, arglocs[0], arglocs[1])
         return c.EQ
 
+    emit_comp_op_ptr_eq = emit_comp_op_int_eq
+
     def emit_comp_op_int_ne(self, op, arglocs):
         self.emit_int_comp_op(op, arglocs[0], arglocs[1])
         return c.NE
 
+    emit_comp_op_ptr_ne = emit_comp_op_int_ne
+
     def emit_comp_op_uint_lt(self, op, arglocs):
         self.emit_int_comp_op(op, arglocs[0], arglocs[1])
         return c.LO
diff --git a/rpython/jit/backend/aarch64/regalloc.py b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -417,6 +417,8 @@
     prepare_comp_op_int_gt = prepare_int_cmp
     prepare_comp_op_int_ne = prepare_int_cmp
     prepare_comp_op_int_eq = prepare_int_cmp
+    prepare_comp_op_ptr_eq = prepare_int_cmp
+    prepare_comp_op_ptr_ne = prepare_int_cmp
     prepare_comp_op_uint_lt = prepare_int_cmp
     prepare_comp_op_uint_le = prepare_int_cmp
     prepare_comp_op_uint_ge = prepare_int_cmp


More information about the pypy-commit mailing list