[pypy-commit] pypy arm64: start fighting registers
fijal
pypy.commits at gmail.com
Tue Jun 11 07:26:33 EDT 2019
Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: arm64
Changeset: r96791:7f271fc65bd8
Date: 2019-06-11 11:25 +0000
http://bitbucket.org/pypy/pypy/changeset/7f271fc65bd8/
Log: start fighting registers
diff --git a/rpython/jit/backend/aarch64/assembler.py b/rpython/jit/backend/aarch64/assembler.py
--- a/rpython/jit/backend/aarch64/assembler.py
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -4,11 +4,9 @@
from rpython.jit.backend.aarch64.locations import imm, StackLocation, get_fp_offset
#from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size
from rpython.jit.backend.aarch64.opassembler import ResOpAssembler
-from rpython.jit.backend.aarch64.regalloc import (Regalloc,
+from rpython.jit.backend.aarch64.regalloc import (Regalloc, check_imm_arg,
operations as regalloc_operations, guard_operations, comp_operations,
CoreRegisterManager)
-# CoreRegisterManager, check_imm_arg, VFPRegisterManager,
-#from rpython.jit.backend.arm import callbuilder
from rpython.jit.backend.aarch64 import registers as r
from rpython.jit.backend.arm import conditions as c
from rpython.jit.backend.llsupport import jitframe
@@ -288,21 +286,19 @@
self._push_all_regs_to_jitframe(mc, [], withfloats)
if exc:
- return # fix later
- XXX
# We might have an exception pending. Load it into r4
# (this is a register saved across calls)
- mc.gen_load_int(r.r5.value, self.cpu.pos_exc_value())
- mc.LDR_ri(r.r4.value, r.r5.value)
+ mc.gen_load_int(r.x5.value, self.cpu.pos_exc_value())
+ mc.LDR_ri(r.x4.value, r.x5.value, 0)
# clear the exc flags
- mc.gen_load_int(r.r6.value, 0)
- mc.STR_ri(r.r6.value, r.r5.value) # pos_exc_value is still in r5
- mc.gen_load_int(r.r5.value, self.cpu.pos_exception())
- mc.STR_ri(r.r6.value, r.r5.value)
+ mc.gen_load_int(r.x6.value, 0)
+ mc.STR_ri(r.x6.value, r.x5.value, 0) # pos_exc_value is still in r5
+ mc.gen_load_int(r.x5.value, self.cpu.pos_exception())
+ mc.STR_ri(r.x6.value, r.x5.value, 0)
# save r4 into 'jf_guard_exc'
offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc')
assert check_imm_arg(abs(offset))
- mc.STR_ri(r.r4.value, r.fp.value, imm=offset)
+ mc.STR_ri(r.x4.value, r.fp.value, offset)
# now we return from the complete frame, which starts from
# _call_header_with_stack_check(). The LEA in _call_footer below
# throws away most of the frame, including all the PUSHes that we
@@ -315,6 +311,12 @@
rawstart = mc.materialize(self.cpu, [])
self.failure_recovery_code[exc + 2 * withfloats] = rawstart
+ def propagate_memoryerror_if_reg_is_null(self, reg_loc):
+ # see ../x86/assembler.py:genop_discard_check_memory_error()
+ self.mc.CMP_ri(reg_loc.value, 0)
+ self.mc.B_ofs_cond(6 * 4, c.NE)
+ self.mc.B(self.propagate_exception_path)
+
def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False):
pass # XXX
@@ -365,7 +367,7 @@
if gcrootmap and gcrootmap.is_shadow_stack:
self._load_shadowstack_top(mc, r.r5, gcrootmap)
# store the new jitframe addr in the shadowstack
- mc.STR_ri(r.r0.value, r.r5.value, imm=-WORD)
+ mc.STR_ri(r.x0.value, r.r5.value, imm=-WORD)
# reset the jf_gcmap field in the jitframe
mc.gen_load_int(r.ip0.value, 0)
@@ -385,13 +387,60 @@
""" Resest the exception. If excvalloc is None, then store it on the
frame in jf_guard_exc
"""
- pass
+ assert excvalloc is not r.ip0
+ assert exctploc is not r.ip0
+ tmpreg = r.lr
+ mc.gen_load_int(r.ip0.value, self.cpu.pos_exc_value())
+ if excvalloc is not None: # store
+ assert excvalloc.is_core_reg()
+ self.load_reg(mc, excvalloc, r.ip0)
+ if on_frame:
+ # store exc_value in JITFRAME
+ ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc')
+ assert check_imm_arg(ofs)
+ #
+ self.load_reg(mc, r.ip0, r.ip0, helper=tmpreg)
+ #
+ self.store_reg(mc, r.ip0, r.fp, ofs, helper=tmpreg)
+ if exctploc is not None:
+ # store pos_exception in exctploc
+ assert exctploc.is_core_reg()
+ mc.gen_load_int(r.ip0.value, self.cpu.pos_exception())
+ self.load_reg(mc, exctploc, r.ip0, helper=tmpreg)
+
+ if on_frame or exctploc is not None:
+ mc.gen_load_int(r.ip0.value, self.cpu.pos_exc_value())
+
+ # reset exception
+ mc.gen_load_int(tmpreg.value, 0)
+
+ self.store_reg(mc, tmpreg, r.ip0, 0)
+
+ mc.gen_load_int(r.ip0.value, self.cpu.pos_exception())
+ self.store_reg(mc, tmpreg, r.ip0, 0)
def _restore_exception(self, mc, excvalloc, exctploc):
pass
def _build_propagate_exception_path(self):
- pass
+ mc = InstrBuilder()
+ self._store_and_reset_exception(mc, r.x0)
+ ofs = self.cpu.get_ofs_of_frame_field('jf_guard_exc')
+ # make sure ofs fits into a register
+ assert check_imm_arg(ofs)
+ self.store_reg(mc, r.x0, r.fp, ofs)
+ propagate_exception_descr = rffi.cast(lltype.Signed,
+ cast_instance_to_gcref(self.cpu.propagate_exception_descr))
+ # put propagate_exception_descr into frame
+ ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
+ # make sure ofs fits into a register
+ assert check_imm_arg(ofs)
+ mc.gen_load_int(r.x0.value, propagate_exception_descr)
+ self.store_reg(mc, r.x0, r.fp, ofs)
+ mc.MOV_rr(r.x0.value, r.fp.value)
+ self.gen_func_epilog(mc)
+ rawstart = mc.materialize(self.cpu, [])
+ self.propagate_exception_path = rawstart
def _build_cond_call_slowpath(self, supports_floats, callee_only):
pass
@@ -464,6 +513,7 @@
self.load_from_gc_table(r.ip0.value, faildescrindex)
self.store_reg(self.mc, r.ip0, r.fp, WORD)
self.push_gcmap(self.mc, gcmap=guardtok.gcmap)
+ assert target
self.mc.BL(target)
return startpos
@@ -776,7 +826,7 @@
mc.RET_r(r.lr.value)
- def store_reg(self, mc, source, base, ofs=0):
+ def store_reg(self, mc, source, base, ofs=0, helper=None):
# uses r.ip1 as a temporary
if source.is_vfp_reg():
return self._store_vfp_reg(mc, source, base, ofs)
@@ -803,6 +853,20 @@
# mc.gen_load_int(r.ip1, ofs)
# mc.STR_rr(source.value, base.value, r.ip1)
+ def load_reg(self, mc, target, base, ofs=0, helper=None):
+ if target.is_vfp_reg():
+ return self._load_vfp_reg(mc, target, base, ofs)
+ elif target.is_core_reg():
+ return self._load_core_reg(mc, target, base, ofs)
+
+ def _load_core_reg(self, mc, target, base, ofs):
+ if check_imm_arg(abs(ofs)):
+ mc.LDR_ri(target.value, base.value, ofs)
+ else:
+ XXX
+ mc.gen_load_int(helper.value, ofs, cond=cond)
+ mc.LDR_rr(target.value, base.value, helper.value, cond=cond)
+
def check_frame_before_jump(self, target_token):
if target_token in self.target_tokens_currently_compiling:
return
diff --git a/rpython/jit/backend/aarch64/codebuilder.py b/rpython/jit/backend/aarch64/codebuilder.py
--- a/rpython/jit/backend/aarch64/codebuilder.py
+++ b/rpython/jit/backend/aarch64/codebuilder.py
@@ -281,7 +281,7 @@
def B(self, target):
target = rffi.cast(lltype.Signed, target)
self.gen_load_int_full(r.ip0.value, target)
- self.BR_r(r.ip0.value)
+ self.BR_r(r.ip0.value)
def BL(self, target):
# XXX use the IMM version if close enough
diff --git a/rpython/jit/backend/aarch64/opassembler.py b/rpython/jit/backend/aarch64/opassembler.py
--- a/rpython/jit/backend/aarch64/opassembler.py
+++ b/rpython/jit/backend/aarch64/opassembler.py
@@ -193,6 +193,9 @@
self.mc.ADD_ri(value_loc.value, value_loc.value, 1)
self.mc.STR_ri(value_loc.value, base_loc.value, 0)
+ def emit_op_check_memory_error(self, op, arglocs):
+ self.propagate_memoryerror_if_reg_is_null(arglocs[0])
+
def _genop_same_as(self, op, arglocs):
argloc, resloc = arglocs
if argloc is not resloc:
@@ -451,7 +454,18 @@
self.mc.LDR_ri(r.ip0.value, arglocs[0].value, offset)
self.mc.gen_load_int_full(r.ip1.value, arglocs[1].value)
self.mc.CMP_rr(r.ip0.value, r.ip1.value)
- self._emit_guard(op, c.EQ, arglocs[2:])
+ self._emit_guard(op, c.EQ, arglocs[2:])
+
+ def emit_op_guard_exception(self, op, arglocs):
+ loc, resloc, pos_exc_value, pos_exception = arglocs[:4]
+ failargs = arglocs[4:]
+ self.mc.gen_load_int(r.ip1.value, pos_exception.value)
+ self.mc.LDR_ri(r.ip0.value, r.ip1.value, 0)
+
+ self.mc.CMP_rr(r.ip0.value, loc.value)
+ self._emit_guard(op, c.EQ, failargs)
+ self._store_and_reset_exception(self.mc, resloc)
+
# ----------------------------- call ------------------------------
diff --git a/rpython/jit/backend/aarch64/regalloc.py b/rpython/jit/backend/aarch64/regalloc.py
--- a/rpython/jit/backend/aarch64/regalloc.py
+++ b/rpython/jit/backend/aarch64/regalloc.py
@@ -695,6 +695,20 @@
prepare_op_guard_gc_type = prepare_op_guard_class
prepare_op_guard_subclass = prepare_op_guard_class
+ def prepare_op_guard_exception(self, op):
+ boxes = op.getarglist()
+ arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint()))
+ loc = self.make_sure_var_in_reg(arg0)
+ if op in self.longevity:
+ resloc = self.force_allocate_reg(op, boxes)
+ self.possibly_free_var(op)
+ else:
+ resloc = None
+ pos_exc_value = imm(self.cpu.pos_exc_value())
+ pos_exception = imm(self.cpu.pos_exception())
+ arglocs = [loc, resloc, pos_exc_value, pos_exception] + self._guard_impl(op)
+ return arglocs
+
prepare_op_ptr_eq = prepare_op_instance_ptr_eq = prepare_op_int_eq
prepare_op_ptr_ne = prepare_op_instance_ptr_ne = prepare_op_int_ne
@@ -730,6 +744,10 @@
res = self.force_allocate_reg(op)
return [arg0, arg1, args[2], args[3], res]
+ def prepare_op_check_memory_error(self, op):
+ argloc = self.make_sure_var_in_reg(op.getarg(0))
+ return [argloc]
+
def prepare_op_jump(self, op):
assert self.jump_target_descr is None
descr = op.getdescr()
diff --git a/rpython/jit/backend/aarch64/registers.py b/rpython/jit/backend/aarch64/registers.py
--- a/rpython/jit/backend/aarch64/registers.py
+++ b/rpython/jit/backend/aarch64/registers.py
@@ -23,4 +23,5 @@
callee_saved_registers = [] # x19, x20, x21, x22]
-argument_regs = caller_resp = [x0, x1, x2, x3, x4, x5, x6, x7]
+argument_regs = [x0, x1, x2, x3, x4, x5, x6, x7]
+caller_resp = argument_regs + [x8, x9, x10, x11, x12, x13, x14, x15]
\ No newline at end of file
More information about the pypy-commit
mailing list