[pypy-commit] pypy arm-backend-2: Cleanup

bivab noreply at buildbot.pypy.org
Thu Dec 29 09:57:22 CET 2011


Author: David Schneider <david.schneider at picle.org>
Branch: arm-backend-2
Changeset: r50940:bd95dd546f05
Date: 2011-12-25 17:39 +0100
http://bitbucket.org/pypy/pypy/changeset/bd95dd546f05/

Log:	Cleanup

diff --git a/pypy/jit/backend/arm/arch.py b/pypy/jit/backend/arm/arch.py
--- a/pypy/jit/backend/arm/arch.py
+++ b/pypy/jit/backend/arm/arch.py
@@ -1,10 +1,9 @@
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.rlib.rarithmetic import r_uint
-from pypy.rpython.lltypesystem import lltype
 
 
-FUNC_ALIGN=8
-WORD=4
+FUNC_ALIGN = 8
+WORD = 4
 
 # the number of registers that we need to save around malloc calls
 N_REGISTERS_SAVED_BY_MALLOC = 9
@@ -27,18 +26,22 @@
 }
 """])
 
-arm_int_div_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed))
+
 def arm_int_div_emulator(a, b):
-    return int(a/float(b))
+    return int(a / float(b))
+arm_int_div_sign = lltype.Ptr(
+        lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed))
 arm_int_div = rffi.llexternal(
     "pypy__arm_int_div", [lltype.Signed, lltype.Signed], lltype.Signed,
                         _callable=arm_int_div_emulator,
                         compilation_info=eci,
                         _nowrapper=True, elidable_function=True)
 
-arm_uint_div_sign = lltype.Ptr(lltype.FuncType([lltype.Unsigned, lltype.Unsigned], lltype.Unsigned))
+
 def arm_uint_div_emulator(a, b):
-    return r_uint(a)/r_uint(b)
+    return r_uint(a) / r_uint(b)
+arm_uint_div_sign = lltype.Ptr(
+        lltype.FuncType([lltype.Unsigned, lltype.Unsigned], lltype.Unsigned))
 arm_uint_div = rffi.llexternal(
     "pypy__arm_uint_div", [lltype.Unsigned, lltype.Unsigned], lltype.Unsigned,
                         _callable=arm_uint_div_emulator,
@@ -46,7 +49,6 @@
                         _nowrapper=True, elidable_function=True)
 
 
-arm_int_mod_sign = arm_int_div_sign
 def arm_int_mod_emulator(a, b):
     sign = 1
     if a < 0:
@@ -56,9 +58,9 @@
         b = -1 * b
     res = a % b
     return sign * res
+arm_int_mod_sign = arm_int_div_sign
 arm_int_mod = rffi.llexternal(
     "pypy__arm_int_mod", [lltype.Signed, lltype.Signed], lltype.Signed,
                         _callable=arm_int_mod_emulator,
                         compilation_info=eci,
                         _nowrapper=True, elidable_function=True)
-
diff --git a/pypy/jit/backend/arm/assembler.py b/pypy/jit/backend/arm/assembler.py
--- a/pypy/jit/backend/arm/assembler.py
+++ b/pypy/jit/backend/arm/assembler.py
@@ -1,41 +1,37 @@
 from __future__ import with_statement
 import os
-from pypy.jit.backend.arm.helper.assembler import saved_registers, count_reg_args, \
+from pypy.jit.backend.arm.helper.assembler import saved_registers, \
+                                                    count_reg_args, \
                                                     decode32, encode32, \
-                                                    decode64, encode64
+                                                    decode64
 from pypy.jit.backend.arm import conditions as c
-from pypy.jit.backend.arm import locations
 from pypy.jit.backend.arm import registers as r
-from pypy.jit.backend.arm.arch import WORD, FUNC_ALIGN, PC_OFFSET, N_REGISTERS_SAVED_BY_MALLOC
+from pypy.jit.backend.arm.arch import WORD, FUNC_ALIGN, \
+                                    PC_OFFSET, N_REGISTERS_SAVED_BY_MALLOC
 from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder
-from pypy.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager, ARMv7RegisterMananger,
-                                        check_imm_arg, TempInt,
-                                        TempPtr,
-                                        operations as regalloc_operations,
-                                        operations_with_guard as regalloc_operations_with_guard)
+from pypy.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager,
+                    ARMv7RegisterMananger, check_imm_arg,
+                    operations as regalloc_operations,
+                    operations_with_guard as regalloc_operations_with_guard)
 from pypy.jit.backend.arm.jump import remap_frame_layout
-from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity, TempBox
+from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity
 from pypy.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
 from pypy.jit.backend.model import CompiledLoopToken
 from pypy.jit.codewriter import longlong
-from pypy.jit.metainterp.history import (Const, ConstInt, ConstPtr,
-                                        BoxInt, BoxPtr, AbstractFailDescr,
-                                        INT, REF, FLOAT)
+from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT)
 from pypy.jit.metainterp.resoperation import rop
 from pypy.rlib import rgc
 from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.rarithmetic import r_uint, r_longlong
-from pypy.rlib.longlong2float import float2longlong, longlong2float
 from pypy.rpython.annlowlevel import llhelper
 from pypy.rpython.lltypesystem import lltype, rffi, llmemory
 from pypy.rpython.lltypesystem.lloperation import llop
-from pypy.jit.backend.arm.opassembler import ResOpAssembler, GuardToken
-from pypy.rlib.debug import (debug_print, debug_start, debug_stop,
-                             have_debug_prints)
+from pypy.jit.backend.arm.opassembler import ResOpAssembler
+from pypy.rlib.debug import debug_print, debug_start, debug_stop
 
 # XXX Move to llsupport
 from pypy.jit.backend.x86.support import values_array, memcpy_fn
 
+
 class AssemblerARM(ResOpAssembler):
     """
     Encoding for locations in memory
@@ -52,8 +48,8 @@
     \xFF = END_OF_LOCS
     """
     FLOAT_TYPE = '\xED'
-    REF_TYPE   = '\xEE'
-    INT_TYPE   = '\xEF'
+    REF_TYPE = '\xEE'
+    INT_TYPE = '\xEF'
 
     STACK_LOC = '\xFC'
     IMM_LOC = '\xFD'
@@ -62,11 +58,11 @@
 
     END_OF_LOCS = '\xFF'
 
-
     def __init__(self, cpu, failargs_limit=1000):
         self.cpu = cpu
         self.fail_boxes_int = values_array(lltype.Signed, failargs_limit)
-        self.fail_boxes_float = values_array(longlong.FLOATSTORAGE, failargs_limit)
+        self.fail_boxes_float = values_array(longlong.FLOATSTORAGE,
+                                                            failargs_limit)
         self.fail_boxes_ptr = values_array(llmemory.GCREF, failargs_limit)
         self.fail_boxes_count = 0
         self.fail_force_index = 0
@@ -87,7 +83,7 @@
 
     def setup(self, looptoken, operations):
         self.current_clt = looptoken.compiled_loop_token
-        operations = self.cpu.gc_ll_descr.rewrite_assembler(self.cpu, 
+        operations = self.cpu.gc_ll_descr.rewrite_assembler(self.cpu,
                         operations, self.current_clt.allgcrefs)
         assert self.memcpy_addr != 0, 'setup_once() not called?'
         self.mc = ARMv7Builder()
@@ -130,7 +126,8 @@
             self._build_release_gil(gc_ll_descr.gcrootmap)
         self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn)
         self._exit_code_addr = self._gen_exit_path()
-        self._leave_jitted_hook_save_exc = self._gen_leave_jitted_hook_code(True)
+        self._leave_jitted_hook_save_exc = \
+                                    self._gen_leave_jitted_hook_code(True)
         self._leave_jitted_hook = self._gen_leave_jitted_hook_code(False)
 
     @staticmethod
@@ -146,13 +143,14 @@
             after()
 
     _NOARG_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void))
+
     def _build_release_gil(self, gcrootmap):
         assert gcrootmap.is_shadow_stack
         releasegil_func = llhelper(self._NOARG_FUNC,
                                    self._release_gil_shadowstack)
         reacqgil_func = llhelper(self._NOARG_FUNC,
                                  self._reacquire_gil_shadowstack)
-        self.releasegil_addr  = rffi.cast(lltype.Signed, releasegil_func)
+        self.releasegil_addr = rffi.cast(lltype.Signed, releasegil_func)
         self.reacqgil_addr = rffi.cast(lltype.Signed, reacqgil_func)
 
     def _gen_leave_jitted_hook_code(self, save_exc):
@@ -186,34 +184,37 @@
         @rgc.no_collect
         def failure_recovery_func(mem_loc, frame_pointer, stack_pointer):
             """mem_loc is a structure in memory describing where the values for
-            the failargs are stored.
-            frame loc is the address of the frame pointer for the frame to be
-            decoded frame """
-            return self.decode_registers_and_descr(mem_loc, frame_pointer, stack_pointer)
+            the failargs are stored.  frame loc is the address of the frame
+            pointer for the frame to be decoded frame """
+            return self.decode_registers_and_descr(mem_loc,
+                                            frame_pointer, stack_pointer)
 
         self.failure_recovery_func = failure_recovery_func
 
-    recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed, lltype.Signed, lltype.Signed], lltype.Signed))
+    recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed,
+                                lltype.Signed, lltype.Signed], lltype.Signed))
 
     @rgc.no_collect
     def decode_registers_and_descr(self, mem_loc, frame_loc, regs_loc):
-        """Decode locations encoded in memory at mem_loc and write the values to
-        the failboxes.
-        Values for spilled vars and registers are stored on stack at frame_loc
-        """
-        #XXX check if units are correct here, when comparing words and bytes and stuff
-        # assert 0, 'check if units are correct here, when comparing words and bytes and stuff'
+        """Decode locations encoded in memory at mem_loc and write the values
+        to the failboxes.  Values for spilled vars and registers are stored on
+        stack at frame_loc """
+        # XXX check if units are correct here, when comparing words and bytes
+        # and stuff assert 0, 'check if units are correct here, when comparing
+        # words and bytes and stuff'
 
         enc = rffi.cast(rffi.CCHARP, mem_loc)
-        frame_depth = frame_loc - (regs_loc + len(r.all_regs)*WORD + len(r.all_vfp_regs)*2*WORD)
+        frame_depth = frame_loc - (regs_loc + len(r.all_regs)
+                            * WORD + len(r.all_vfp_regs) * 2 * WORD)
         assert (frame_loc - frame_depth) % 4 == 0
         stack = rffi.cast(rffi.CCHARP, frame_loc - frame_depth)
         assert regs_loc % 4 == 0
         vfp_regs = rffi.cast(rffi.CCHARP, regs_loc)
-        assert (regs_loc + len(r.all_vfp_regs)*2*WORD) % 4 == 0
+        assert (regs_loc + len(r.all_vfp_regs) * 2 * WORD) % 4 == 0
         assert frame_depth >= 0
 
-        regs = rffi.cast(rffi.CCHARP, regs_loc + len(r.all_vfp_regs)*2*WORD)
+        regs = rffi.cast(rffi.CCHARP,
+                    regs_loc + len(r.all_vfp_regs) * 2 * WORD)
         i = -1
         fail_index = -1
         while(True):
@@ -231,33 +232,35 @@
             if res == self.IMM_LOC:
                 # imm value
                 if group == self.INT_TYPE or group == self.REF_TYPE:
-                    value = decode32(enc, i+1)
+                    value = decode32(enc, i + 1)
                     i += 4
                 else:
                     assert group == self.FLOAT_TYPE
-                    adr = decode32(enc, i+1)
-                    value = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0]
+                    adr = decode32(enc, i + 1)
+                    tp = rffi.CArrayPtr(longlong.FLOATSTORAGE)
+                    value = rffi.cast(tp, adr)[0]
                     self.fail_boxes_float.setitem(fail_index, value)
                     i += 4
                     continue
             elif res == self.STACK_LOC:
-                stack_loc = decode32(enc, i+1)
+                stack_loc = decode32(enc, i + 1)
                 i += 4
                 if group == self.FLOAT_TYPE:
-                    value = decode64(stack, frame_depth - (stack_loc+1)*WORD)
+                    value = decode64(stack,
+                            frame_depth - (stack_loc + 1) * WORD)
                     fvalue = rffi.cast(longlong.FLOATSTORAGE, value)
                     self.fail_boxes_float.setitem(fail_index, fvalue)
                     continue
                 else:
-                    value = decode32(stack, frame_depth - stack_loc*WORD)
-            else: # REG_LOC
+                    value = decode32(stack, frame_depth - stack_loc * WORD)
+            else:  # REG_LOC
                 reg = ord(enc[i])
                 if group == self.FLOAT_TYPE:
-                    value = decode64(vfp_regs, reg*2*WORD)
+                    value = decode64(vfp_regs, reg * 2 * WORD)
                     self.fail_boxes_float.setitem(fail_index, value)
                     continue
                 else:
-                    value = decode32(regs, reg*WORD)
+                    value = decode32(regs, reg * WORD)
 
             if group == self.INT_TYPE:
                 self.fail_boxes_int.setitem(fail_index, value)
@@ -268,9 +271,8 @@
             else:
                 assert 0, 'unknown type'
 
-
         assert enc[i] == self.END_OF_LOCS
-        descr = decode32(enc, i+1)
+        descr = decode32(enc, i + 1)
         self.fail_boxes_count = fail_index
         self.fail_force_index = frame_loc
         return descr
@@ -284,7 +286,8 @@
                 j += 1
                 continue
 
-            assert res in [self.FLOAT_TYPE, self.INT_TYPE, self.REF_TYPE], 'location type is not supported'
+            assert res in [self.FLOAT_TYPE, self.INT_TYPE, self.REF_TYPE], \
+                        'location type is not supported'
             res_type = res
             j += 1
             res = enc[j]
@@ -298,10 +301,10 @@
                     t = INT
                 else:
                     t = REF
-                stack_loc = decode32(enc, j+1)
+                stack_loc = decode32(enc, j + 1)
                 loc = regalloc.frame_manager.frame_pos(stack_loc, t)
                 j += 4
-            else: # REG_LOC
+            else:  # REG_LOC
                 if res_type == self.FLOAT_TYPE:
                     loc = r.all_vfp_regs[ord(res)]
                 else:
@@ -311,7 +314,6 @@
         return locs
 
     def _build_malloc_slowpath(self):
-        gcrootmap = self.cpu.gc_ll_descr.gcrootmap
         mc = ARMv7Builder()
         assert self.cpu.supports_floats
         # We need to push two registers here because we are going to make a
@@ -351,13 +353,16 @@
 
     def _gen_exit_path(self):
         mc = ARMv7Builder()
-        decode_registers_addr = llhelper(self.recovery_func_sign, self.failure_recovery_func)
-        
+        decode_registers_addr = llhelper(self.recovery_func_sign,
+                                            self.failure_recovery_func)
         self._insert_checks(mc)
         with saved_registers(mc, r.all_regs, r.all_vfp_regs):
-            mc.MOV_rr(r.r0.value, r.ip.value) # move mem block address, to r0 to pass as
-            mc.MOV_rr(r.r1.value, r.fp.value) # pass the current frame pointer as second param
-            mc.MOV_rr(r.r2.value, r.sp.value) # pass the current stack pointer as third param
+            # move mem block address, to r0 to pass as
+            mc.MOV_rr(r.r0.value, r.ip.value)
+            # pass the current frame pointer as second param
+            mc.MOV_rr(r.r1.value, r.fp.value)
+            # pass the current stack pointer as third param
+            mc.MOV_rr(r.r2.value, r.sp.value)
             self._insert_checks(mc)
             mc.BL(rffi.cast(lltype.Signed, decode_registers_addr))
             mc.MOV_rr(r.ip.value, r.r0.value)
@@ -376,15 +381,15 @@
         # 1 separator byte
         # 4 bytes for the faildescr
         # const floats are stored in memory and the box contains the address
-        memsize = (len(arglocs)-1)*6+5
+        memsize = (len(arglocs) - 1) * 6 + 5
         memaddr = self.datablockwrapper.malloc_aligned(memsize, alignment=1)
         mem = rffi.cast(rffi.CArrayPtr(lltype.Char), memaddr)
         i = 0
         j = 0
         while i < len(args):
-            if arglocs[i+1]:
+            if arglocs[i + 1]:
                 arg = args[i]
-                loc = arglocs[i+1]
+                loc = arglocs[i + 1]
                 if arg.type == INT:
                     mem[j] = self.INT_TYPE
                     j += 1
@@ -404,7 +409,7 @@
                     assert (arg.type == INT or arg.type == REF
                                 or arg.type == FLOAT)
                     mem[j] = self.IMM_LOC
-                    encode32(mem, j+1, loc.getint())
+                    encode32(mem, j + 1, loc.getint())
                     j += 5
                 else:
                     assert loc.is_stack()
@@ -413,9 +418,9 @@
                         # Float locs store the location number with an offset
                         # of 1 -.- so we need to take this into account here
                         # when generating the encoding
-                        encode32(mem, j+1, loc.position-1)
+                        encode32(mem, j + 1, loc.position - 1)
                     else:
-                        encode32(mem, j+1, loc.position)
+                        encode32(mem, j + 1, loc.position)
                     j += 5
             else:
                 mem[j] = self.EMPTY_LOC
@@ -425,10 +430,11 @@
         mem[j] = chr(0xFF)
 
         n = self.cpu.get_fail_descr_number(descr)
-        encode32(mem, j+1, n)
+        encode32(mem, j + 1, n)
         return memaddr
 
-    def _gen_path_to_exit_path(self, descr, args, arglocs, save_exc, fcond=c.AL):
+    def _gen_path_to_exit_path(self, descr, args, arglocs,
+                                            save_exc, fcond=c.AL):
         assert isinstance(save_exc, bool)
         memaddr = self.gen_descr_encoding(descr, args, arglocs)
         self.gen_exit_code(self.mc, memaddr, save_exc, fcond)
@@ -457,11 +463,13 @@
             self.gen_footer_shadowstack(gcrootmap, mc)
         offset = 1
         if self.cpu.supports_floats:
-            offset += 1 # to keep stack alignment
+            offset += 1  # to keep stack alignment
         mc.MOV_rr(r.sp.value, r.fp.value, cond=cond)
-        mc.ADD_ri(r.sp.value, r.sp.value, (N_REGISTERS_SAVED_BY_MALLOC+offset)*WORD, cond=cond)
+        mc.ADD_ri(r.sp.value, r.sp.value,
+                    (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD, cond=cond)
         if self.cpu.supports_floats:
-            mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers], cond=cond)
+            mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers],
+                                                                    cond=cond)
         mc.POP([reg.value for reg in r.callee_restored_registers], cond=cond)
 
     def gen_func_prolog(self):
@@ -469,11 +477,12 @@
         offset = 1
         if self.cpu.supports_floats:
             self.mc.VPUSH([reg.value for reg in r.callee_saved_vfp_registers])
-            offset +=1 # to keep stack alignment
+            offset += 1  # to keep stack alignment
         # here we modify the stack pointer to leave room for the 9 registers
         # that are going to be saved here around malloc calls and one word to
         # store the force index
-        self.mc.SUB_ri(r.sp.value, r.sp.value, (N_REGISTERS_SAVED_BY_MALLOC+offset)*WORD)
+        self.mc.SUB_ri(r.sp.value, r.sp.value,
+                    (N_REGISTERS_SAVED_BY_MALLOC + offset) * WORD)
         self.mc.MOV_rr(r.fp.value, r.sp.value)
         gcrootmap = self.cpu.gc_ll_descr.gcrootmap
         if gcrootmap and gcrootmap.is_shadow_stack:
@@ -485,18 +494,19 @@
         # XXX add some comments
         rst = gcrootmap.get_root_stack_top_addr()
         self.mc.gen_load_int(r.ip.value, rst)
-        self.mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop]
-        self.mc.ADD_ri(r.r5.value, r.r4.value, imm=2*WORD) # ADD r5, r4 [2*WORD]
+        self.mc.LDR_ri(r.r4.value, r.ip.value)  # LDR r4, [rootstacktop]
+        self.mc.ADD_ri(r.r5.value, r.r4.value,
+                                    imm=2 * WORD)  # ADD r5, r4 [2*WORD]
         self.mc.gen_load_int(r.r6.value, gcrootmap.MARKER)
         self.mc.STR_ri(r.r6.value, r.r4.value)
-        self.mc.STR_ri(r.fp.value, r.r4.value, WORD) 
+        self.mc.STR_ri(r.fp.value, r.r4.value, WORD)
         self.mc.STR_ri(r.r5.value, r.ip.value)
 
     def gen_footer_shadowstack(self, gcrootmap, mc):
         rst = gcrootmap.get_root_stack_top_addr()
         mc.gen_load_int(r.ip.value, rst)
-        mc.LDR_ri(r.r4.value, r.ip.value) # LDR r4, [rootstacktop]
-        mc.SUB_ri(r.r5.value, r.r4.value, imm=2*WORD) # ADD r5, r4 [2*WORD]
+        mc.LDR_ri(r.r4.value, r.ip.value)  # LDR r4, [rootstacktop]
+        mc.SUB_ri(r.r5.value, r.r4.value, imm=2 * WORD)  # ADD r5, r4 [2*WORD]
         mc.STR_ri(r.r5.value, r.ip.value)
 
     def gen_bootstrap_code(self, nonfloatlocs, floatlocs, inputargs):
@@ -540,8 +550,6 @@
 
         reg_args = count_reg_args(inputargs)
 
-        stack_locs = len(inputargs) - reg_args
-
         selected_reg = 0
         count = 0
         float_args = []
@@ -569,16 +577,16 @@
 
         # move float arguments to vfp regsiters
         for loc, vfp_reg in float_args:
-            self.mov_to_vfp_loc(loc, r.all_regs[loc.value+1], vfp_reg)
+            self.mov_to_vfp_loc(loc, r.all_regs[loc.value + 1], vfp_reg)
 
         # remap values stored in core registers
         remap_frame_layout(self, nonfloat_args, nonfloat_regs, r.ip)
 
         # load values passed on the stack to the corresponding locations
-        stack_position = len(r.callee_saved_registers)*WORD + \
-                            len(r.callee_saved_vfp_registers)*2*WORD + \
-                            N_REGISTERS_SAVED_BY_MALLOC * WORD + \
-                            2 * WORD # for the FAIL INDEX and the stack padding
+        stack_position = len(r.callee_saved_registers) * WORD + \
+                        len(r.callee_saved_vfp_registers) * 2 * WORD + \
+                        N_REGISTERS_SAVED_BY_MALLOC * WORD + \
+                        2 * WORD  # for the FAIL INDEX and the stack padding
         count = 0
         for i in range(reg_args, len(inputargs)):
             arg = inputargs[i]
@@ -625,6 +633,7 @@
         for op in ops:
             debug_print(op.repr())
         debug_stop('jit-backend-ops')
+
     # cpu interface
     def assemble_loop(self, inputargs, operations, looptoken, log):
 
@@ -635,13 +644,14 @@
         operations = self.setup(looptoken, operations)
         self._dump(operations)
         longevity = compute_vars_longevity(inputargs, operations)
-        regalloc = Regalloc(longevity, assembler=self, frame_manager=ARMFrameManager())
-
+        regalloc = Regalloc(longevity, assembler=self,
+                                frame_manager=ARMFrameManager())
 
         self.align()
         self.gen_func_prolog()
         sp_patch_location = self._prepare_sp_patch_position()
-        nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs, operations, looptoken)
+        nonfloatlocs, floatlocs = regalloc.prepare_loop(inputargs,
+                                                operations, looptoken)
         self.gen_bootstrap_code(nonfloatlocs, floatlocs, inputargs)
         looptoken._arm_arglocs = [nonfloatlocs, floatlocs]
         loop_head = self.mc.currpos()
@@ -662,11 +672,13 @@
         self.write_pending_failure_recoveries()
         loop_start = self.materialize_loop(looptoken)
         looptoken._arm_bootstrap_code = loop_start
-        looptoken._arm_direct_bootstrap_code = loop_start + direct_bootstrap_code
+        direct_code_start = loop_start + direct_bootstrap_code
+        looptoken._arm_direct_bootstrap_code = direct_code_start
         self.process_pending_guards(loop_start)
         if log and not we_are_translated():
             print 'Loop', inputargs, operations
-            self.mc._dump_trace(loop_start, 'loop_%s.asm' % self.cpu.total_compiled_loops)
+            self.mc._dump_trace(loop_start,
+                    'loop_%s.asm' % self.cpu.total_compiled_loops)
             print 'Done assembling loop with token %r' % looptoken
         self.teardown()
 
@@ -689,14 +701,15 @@
 
         self._walk_operations(operations, regalloc)
 
-        #original_loop_token._arm_frame_depth = regalloc.frame_manager.frame_depth
-        self._patch_sp_offset(sp_patch_location, regalloc.frame_manager.frame_depth)
+        self._patch_sp_offset(sp_patch_location,
+                                regalloc.frame_manager.frame_depth)
 
         self.write_pending_failure_recoveries()
         bridge_start = self.materialize_loop(original_loop_token)
         self.process_pending_guards(bridge_start)
 
-        self.patch_trace(faildescr, original_loop_token, bridge_start, regalloc)
+        self.patch_trace(faildescr, original_loop_token,
+                                    bridge_start, regalloc)
         if log and not we_are_translated():
             print 'Bridge', inputargs, operations
             self.mc._dump_trace(bridge_start, 'bridge_%d.asm' %
@@ -715,10 +728,10 @@
             descr = tok.descr
             #generate the exit stub and the encoded representation
             pos = self.mc.currpos()
-            tok.pos_recovery_stub = pos 
+            tok.pos_recovery_stub = pos
 
             memaddr = self._gen_path_to_exit_path(descr, tok.failargs,
-                                            tok.faillocs, save_exc=tok.save_exc)
+                                        tok.faillocs, save_exc=tok.save_exc)
             # store info on the descr
             descr._arm_frame_depth = tok.faillocs[0].getint()
             descr._failure_recovery_code = memaddr
@@ -735,13 +748,15 @@
 
             if not tok.is_invalidate:
                 #patch the guard jumpt to the stub
-                # overwrite the generate NOP with a B_offs to the pos of the stub
+                # overwrite the generate NOP with a B_offs to the pos of the
+                # stub
                 mc = ARMv7Builder()
-                mc.B_offs(descr._arm_guard_pos - tok.offset, c.get_opposite_of(tok.fcond))
+                mc.B_offs(descr._arm_guard_pos - tok.offset,
+                                    c.get_opposite_of(tok.fcond))
                 mc.copy_to_raw_memory(block_start + tok.offset)
             else:
                 clt.invalidate_positions.append(
-                    (block_start + tok.offset, descr._arm_guard_pos - tok.offset))
+                (block_start + tok.offset, descr._arm_guard_pos - tok.offset))
 
     def get_asmmemmgr_blocks(self, looptoken):
         clt = looptoken.compiled_loop_token
@@ -750,21 +765,22 @@
         return clt.asmmemmgr_blocks
 
     def _prepare_sp_patch_position(self):
-        """Generate NOPs as placeholder to patch the instruction(s) to update the
-        sp according to the number of spilled variables"""
-        size = (self.mc.size_of_gen_load_int+WORD)
+        """Generate NOPs as placeholder to patch the instruction(s) to update
+        the sp according to the number of spilled variables"""
+        size = (self.mc.size_of_gen_load_int + WORD)
         l = self.mc.currpos()
-        for _ in range(size//WORD):
+        for _ in range(size // WORD):
             self.mc.NOP()
         return l
 
     def _patch_sp_offset(self, pos, frame_depth):
-        cb = OverwritingBuilder(self.mc, pos, OverwritingBuilder.size_of_gen_load_int+WORD)
+        cb = OverwritingBuilder(self.mc, pos,
+                                OverwritingBuilder.size_of_gen_load_int + WORD)
         # Note: the frame_depth is one less than the value stored in the frame
         # manager
         if frame_depth == 1:
             return
-        n = (frame_depth-1)*WORD
+        n = (frame_depth - 1) * WORD
 
         # ensure the sp is 8 byte aligned when patching it
         if n % 8 != 0:
@@ -794,7 +810,7 @@
                 cb.SUB_rr(r.sp.value, base_reg.value, r.ip.value, cond=fcond)
 
     def _walk_operations(self, operations, regalloc):
-        fcond=c.AL
+        fcond = c.AL
         self._regalloc = regalloc
         while regalloc.position() < len(operations) - 1:
             regalloc.next_instruction()
@@ -804,7 +820,7 @@
             if op.has_no_side_effect() and op.result not in regalloc.longevity:
                 regalloc.possibly_free_vars_for_op(op)
             elif self.can_merge_with_next_guard(op, i, operations):
-                guard = operations[i+1]
+                guard = operations[i + 1]
                 assert guard.is_guard()
                 arglocs = regalloc_operations_with_guard[opnum](regalloc, op,
                                         guard, fcond)
@@ -818,7 +834,8 @@
             else:
                 arglocs = regalloc_operations[opnum](regalloc, op, fcond)
                 if arglocs is not None:
-                    fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond)
+                    fcond = asm_operations[opnum](self, op, arglocs,
+                                                        regalloc, fcond)
             if op.is_guard():
                 regalloc.possibly_free_vars(op.getfailargs())
             if op.result:
@@ -864,7 +881,7 @@
         if size == 4:
             return
         if size == 1:
-            if not signed: #unsigned char
+            if not signed:  # unsigned char
                 self.mc.AND_ri(resloc.value, resloc.value, 0xFF)
             else:
                 self.mc.LSL_ri(resloc.value, resloc.value, 24)
@@ -873,9 +890,6 @@
             if not signed:
                 self.mc.LSL_ri(resloc.value, resloc.value, 16)
                 self.mc.LSR_ri(resloc.value, resloc.value, 16)
-                #self.mc.MOV_ri(r.ip.value, 0xFF)
-                #self.mc.ORR_ri(r.ip.value, 0xCFF)
-                #self.mc.AND_rr(resloc.value, resloc.value, r.ip.value)
             else:
                 self.mc.LSL_ri(resloc.value, resloc.value, 16)
                 self.mc.ASR_ri(resloc.value, resloc.value, 16)
@@ -924,24 +938,28 @@
                 temp = r.lr
             else:
                 temp = r.ip
-            offset = loc.position*WORD
+            offset = loc.position * WORD
             if not check_imm_arg(offset, size=0xFFF):
                 self.mc.PUSH([temp.value], cond=cond)
                 self.mc.gen_load_int(temp.value, -offset, cond=cond)
-                self.mc.STR_rr(prev_loc.value, r.fp.value, temp.value, cond=cond)
+                self.mc.STR_rr(prev_loc.value, r.fp.value,
+                                            temp.value, cond=cond)
                 self.mc.POP([temp.value], cond=cond)
             else:
-                self.mc.STR_ri(prev_loc.value, r.fp.value, imm=-offset, cond=cond)
+                self.mc.STR_ri(prev_loc.value, r.fp.value,
+                                            imm=-offset, cond=cond)
         else:
             assert 0, 'unsupported case'
 
     def _mov_stack_to_loc(self, prev_loc, loc, cond=c.AL):
         pushed = False
         if loc.is_reg():
-            assert prev_loc.type != FLOAT, 'trying to load from an incompatible location into a core register'
-            assert loc is not r.lr, 'lr is not supported as a target when moving from the stack'
+            assert prev_loc.type != FLOAT, 'trying to load from an \
+                incompatible location into a core register'
+            assert loc is not r.lr, 'lr is not supported as a target \
+                when moving from the stack'
             # unspill a core register
-            offset = prev_loc.position*WORD
+            offset = prev_loc.position * WORD
             if not check_imm_arg(offset, size=0xFFF):
                 self.mc.PUSH([r.lr.value], cond=cond)
                 pushed = True
@@ -952,9 +970,10 @@
             if pushed:
                 self.mc.POP([r.lr.value], cond=cond)
         elif loc.is_vfp_reg():
-            assert prev_loc.type == FLOAT, 'trying to load from an incompatible location into a float register'
+            assert prev_loc.type == FLOAT, 'trying to load from an \
+                incompatible location into a float register'
             # load spilled value into vfp reg
-            offset = prev_loc.position*WORD
+            offset = prev_loc.position * WORD
             self.mc.PUSH([r.ip.value], cond=cond)
             pushed = True
             if not check_imm_arg(offset):
@@ -980,10 +999,11 @@
         if loc.is_vfp_reg():
             self.mc.VMOV_cc(loc.value, prev_loc.value, cond=cond)
         elif loc.is_stack():
-            assert loc.type == FLOAT, 'trying to store to an incompatible location from a float register'
+            assert loc.type == FLOAT, 'trying to store to an \
+                incompatible location from a float register'
             # spill vfp register
             self.mc.PUSH([r.ip.value], cond=cond)
-            offset = loc.position*WORD
+            offset = loc.position * WORD
             if not check_imm_arg(offset):
                 self.mc.gen_load_int(r.ip.value, offset, cond=cond)
                 self.mc.SUB_rr(r.ip.value, r.fp.value, r.ip.value, cond=cond)
@@ -1026,7 +1046,7 @@
             self.mc.POP([r.ip.value], cond=cond)
         elif vfp_loc.is_stack() and vfp_loc.type == FLOAT:
             # load spilled vfp value into two core registers
-            offset = vfp_loc.position*WORD
+            offset = vfp_loc.position * WORD
             if not check_imm_arg(offset, size=0xFFF):
                 self.mc.PUSH([r.ip.value], cond=cond)
                 self.mc.gen_load_int(r.ip.value, -offset, cond=cond)
@@ -1036,7 +1056,8 @@
                 self.mc.POP([r.ip.value], cond=cond)
             else:
                 self.mc.LDR_ri(reg1.value, r.fp.value, imm=-offset, cond=cond)
-                self.mc.LDR_ri(reg2.value, r.fp.value, imm=-offset+WORD, cond=cond)
+                self.mc.LDR_ri(reg2.value, r.fp.value,
+                                                imm=-offset + WORD, cond=cond)
         else:
             assert 0, 'unsupported case'
 
@@ -1048,7 +1069,7 @@
             self.mc.VMOV_cr(vfp_loc.value, reg1.value, reg2.value, cond=cond)
         elif vfp_loc.is_stack():
             # move from two core registers to a float stack location
-            offset = vfp_loc.position*WORD
+            offset = vfp_loc.position * WORD
             if not check_imm_arg(offset, size=0xFFF):
                 self.mc.PUSH([r.ip.value], cond=cond)
                 self.mc.gen_load_int(r.ip.value, -offset, cond=cond)
@@ -1058,7 +1079,8 @@
                 self.mc.POP([r.ip.value], cond=cond)
             else:
                 self.mc.STR_ri(reg1.value, r.fp.value, imm=-offset, cond=cond)
-                self.mc.STR_ri(reg2.value, r.fp.value, imm=-offset+WORD, cond=cond)
+                self.mc.STR_ri(reg2.value, r.fp.value,
+                                                imm=-offset + WORD, cond=cond)
         else:
             assert 0, 'unsupported case'
 
@@ -1111,7 +1133,7 @@
 
     def malloc_cond(self, nursery_free_adr, nursery_top_adr, size, tid):
         size = max(size, self.cpu.gc_ll_descr.minimal_size_in_nursery)
-        size = (size + WORD-1) & ~(WORD-1)     # round up
+        size = (size + WORD - 1) & ~(WORD - 1)     # round up
 
         self.mc.gen_load_int(r.r0.value, nursery_free_adr)
         self.mc.LDR_ri(r.r0.value, r.r0.value)
@@ -1156,7 +1178,6 @@
         self.mc.gen_load_int(r.ip.value, tid)
         self.mc.STR_ri(r.ip.value, r.r0.value)
 
-
     def mark_gc_roots(self, force_index, use_copy_area=False):
         if force_index < 0:
             return     # not needed
@@ -1180,14 +1201,18 @@
         else:
             return 0
 
+
 def not_implemented(msg):
     os.write(2, '[ARM/asm] %s\n' % msg)
     raise NotImplementedError(msg)
 
+
 def notimplemented_op(self, op, arglocs, regalloc, fcond):
-    raise NotImplementedError, op
+    raise NotImplementedError(op)
+
+
 def notimplemented_op_with_guard(self, op, guard_op, arglocs, regalloc, fcond):
-    raise NotImplementedError, op
+    raise NotImplementedError(op)
 
 asm_operations = [notimplemented_op] * (rop._LAST + 1)
 asm_operations_with_guard = [notimplemented_op_with_guard] * (rop._LAST + 1)
@@ -1209,4 +1234,3 @@
     if hasattr(AssemblerARM, methname):
         func = getattr(AssemblerARM, methname).im_func
         asm_operations_with_guard[value] = func
-
diff --git a/pypy/jit/backend/arm/codebuilder.py b/pypy/jit/backend/arm/codebuilder.py
--- a/pypy/jit/backend/arm/codebuilder.py
+++ b/pypy/jit/backend/arm/codebuilder.py
@@ -4,13 +4,9 @@
 from pypy.jit.backend.arm.arch import (WORD, FUNC_ALIGN)
 from pypy.jit.backend.arm.instruction_builder import define_instructions
 from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
-from pypy.jit.metainterp.history import ConstInt, BoxInt, AbstractFailDescr
 from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.rmmap import alloc, PTR
-from pypy.rpython.annlowlevel import llhelper
 from pypy.rpython.lltypesystem import lltype, rffi, llmemory
 from pypy.tool.udir import udir
-from pypy.translator.tool.cbuild import ExternalCompilationInfo
 
 clear_cache = rffi.llexternal(
     "__clear_cache",
@@ -19,9 +15,10 @@
     _nowrapper=True,
     sandboxsafe=True)
 
+
 def binary_helper_call(name):
-    signature = getattr(arch, 'arm_%s_sign' % name)
     function = getattr(arch, 'arm_%s' % name)
+
     def f(self, c=cond.AL):
         """Generates a call to a helper function, takes its
         arguments in r0 and r1, result is placed in r0"""
@@ -31,9 +28,10 @@
         else:
             self.PUSH(range(2, 4), cond=c)
             self.BL(addr, c)
-            self.POP(range(2,4), cond=c)
+            self.POP(range(2, 4), cond=c)
     return f
 
+
 class AbstractARMv7Builder(object):
 
     def __init__(self):
@@ -42,6 +40,7 @@
     def align(self):
         while(self.currpos() % FUNC_ALIGN != 0):
             self.writechar(chr(0))
+
     def NOP(self):
         self.MOV_rr(0, 0)
 
@@ -79,7 +78,7 @@
                 | 0xB << 8
                 | nregs)
         self.write32(instr)
-    
+
     def VMOV_rc(self, rt, rt2, dm, cond=cond.AL):
         """This instruction copies two words from two ARM core registers into a
         doubleword extension register, or from a doubleword extension register
@@ -116,7 +115,7 @@
         self.write32(instr)
 
     def VMOV_cc(self, dd, dm, cond=cond.AL):
-        sz = 1 # for 64-bit mode
+        sz = 1  # for 64-bit mode
         instr = (cond << 28
                 | 0xEB << 20
                 | (dd & 0xF) << 12
@@ -163,10 +162,8 @@
         self.write32(cond << 28 | 0xEF1FA10)
 
     def B(self, target, c=cond.AL):
-        #assert self._fits_in_24bits(target)
-        #return (c << 20 | 0xA << 24 | target & 0xFFFFFF)
         if c == cond.AL:
-            self.LDR_ri(reg.pc.value, reg.pc.value, -arch.PC_OFFSET/2)
+            self.LDR_ri(reg.pc.value, reg.pc.value, -arch.PC_OFFSET / 2)
             self.write32(target)
         else:
             self.gen_load_int(reg.ip.value, target, cond=c)
@@ -180,8 +177,8 @@
 
     def BL(self, target, c=cond.AL):
         if c == cond.AL:
-            self.ADD_ri(reg.lr.value, reg.pc.value, arch.PC_OFFSET/2)
-            self.LDR_ri(reg.pc.value, reg.pc.value, imm=-arch.PC_OFFSET/2)
+            self.ADD_ri(reg.lr.value, reg.pc.value, arch.PC_OFFSET / 2)
+            self.LDR_ri(reg.pc.value, reg.pc.value, imm=-arch.PC_OFFSET / 2)
             self.write32(target)
         else:
             self.gen_load_int(reg.ip.value, target, cond=c)
@@ -235,7 +232,6 @@
     def currpos(self):
         raise NotImplementedError
 
-    size_of_gen_load_int = 2 * WORD
     def gen_load_int(self, r, value, cond=cond.AL):
         """r is the register number, value is the value to be loaded to the
         register"""
@@ -244,6 +240,8 @@
         self.MOVW_ri(r, bottom, cond)
         if top:
             self.MOVT_ri(r, top, cond)
+    size_of_gen_load_int = 2 * WORD
+
 
 class OverwritingBuilder(AbstractARMv7Builder):
     def __init__(self, cb, start, size):
@@ -260,6 +258,7 @@
         self.cb.overwrite(self.index, char)
         self.index += 1
 
+
 class ARMv7Builder(BlockBuilderMixin, AbstractARMv7Builder):
     def __init__(self):
         AbstractARMv7Builder.__init__(self)
@@ -279,7 +278,7 @@
     # XXX remove and setup aligning in llsupport
     def materialize(self, asmmemmgr, allblocks, gcrootmap=None):
         size = self.get_relative_pos()
-        malloced = asmmemmgr.malloc(size, size+7)
+        malloced = asmmemmgr.malloc(size, size + 7)
         allblocks.append(malloced)
         rawstart = malloced[0]
         while(rawstart % FUNC_ALIGN != 0):
@@ -294,7 +293,8 @@
     def clear_cache(self, addr):
         if we_are_translated():
             startaddr = rffi.cast(llmemory.Address, addr)
-            endaddr = rffi.cast(llmemory.Address, addr + self.get_relative_pos())
+            endaddr = rffi.cast(llmemory.Address,
+                            addr + self.get_relative_pos())
             clear_cache(startaddr, endaddr)
 
     def copy_to_raw_memory(self, addr):
diff --git a/pypy/jit/backend/arm/conditions.py b/pypy/jit/backend/arm/conditions.py
--- a/pypy/jit/backend/arm/conditions.py
+++ b/pypy/jit/backend/arm/conditions.py
@@ -15,10 +15,12 @@
 AL = 0xE
 
 opposites = [NE, EQ, CC, CS, PL, MI, VC, VS, LS, HI, LT, GE, LE, GT, AL]
+
+
 def get_opposite_of(operation):
     return opposites[operation]
 
-# see mapping for floating poin according to 
+# see mapping for floating poin according to
 # http://blogs.arm.com/software-enablement/405-condition-codes-4-floating-point-comparisons-using-vfp/
 VFP_LT = CC
 VFP_LE = LS
diff --git a/pypy/jit/backend/arm/instruction_builder.py b/pypy/jit/backend/arm/instruction_builder.py
--- a/pypy/jit/backend/arm/instruction_builder.py
+++ b/pypy/jit/backend/arm/instruction_builder.py
@@ -1,5 +1,7 @@
 from pypy.jit.backend.arm import conditions as cond
 from pypy.jit.backend.arm import instructions
+
+
 # move table lookup out of generated functions
 def define_load_store_func(name, table):
     n = (0x1 << 26
@@ -13,6 +15,7 @@
     rncond = ('rn' in table and table['rn'] == '!0xF')
     if table['imm']:
         assert not b_zero
+
         def f(self, rt, rn, imm=0, cond=cond.AL):
             assert not (rncond and rn == 0xF)
             p = 1
@@ -20,7 +23,7 @@
             u, imm = self._encode_imm(imm)
             instr = (n
                     | cond << 28
-                    | (p & 0x1) <<  24
+                    | (p & 0x1) << 24
                     | (u & 0x1) << 23
                     | (w & 0x1) << 21
                     | imm_operation(rt, rn, imm))
@@ -34,7 +37,7 @@
             u, imm = self._encode_imm(imm)
             instr = (n
                     | cond << 28
-                    | (p & 0x1) <<  24
+                    | (p & 0x1) << 24
                     | (u & 0x1) << 23
                     | (w & 0x1) << 21
                     | reg_operation(rt, rn, rm, imm, s, shifttype))
@@ -44,6 +47,7 @@
             self.write32(instr)
     return f
 
+
 def define_extra_load_store_func(name, table):
     def check_registers(r1, r2):
         assert r1 % 2 == 0
@@ -57,7 +61,7 @@
     p = 1
     w = 0
     rncond = ('rn' in table and table['rn'] == '!0xF')
-    dual =  (name[-4] == 'D')
+    dual = (name[-4] == 'D')
 
     if dual:
         if name[-2:] == 'rr':
@@ -114,6 +118,7 @@
                         | (imm & 0xF))
     return f
 
+
 def define_data_proc_imm_func(name, table):
     n = (0x1 << 25
         | (table['op'] & 0x1F) << 20)
@@ -139,6 +144,7 @@
                 | imm_operation(0, rn, imm))
     return imm_func
 
+
 def define_data_proc_func(name, table):
     n = ((table['op1'] & 0x1F) << 20
         | (table['op2'] & 0x1F) << 7
@@ -175,6 +181,7 @@
                         | reg_operation(rd, rn, rm, imm, s, shifttype))
     return f
 
+
 def define_data_proc_reg_shift_reg_func(name, table):
     n = ((0x1 << 4) | (table['op1'] & 0x1F) << 20 | (table['op2'] & 0x3) << 5)
     if 'result' in table and not table['result']:
@@ -211,8 +218,10 @@
                         | (rn & 0xF))
     return f
 
+
 def define_supervisor_and_coproc_func(name, table):
     n = (0x3 << 26 | (table['op1'] & 0x3F) << 20 | (table['op'] & 0x1) << 4)
+
     def f(self, coproc, opc1, rt, crn, crm, opc2=0, cond=cond.AL):
         self.write32(n
                     | cond << 28
@@ -224,6 +233,7 @@
                     | (crm & 0xF))
     return f
 
+
 def define_multiply_func(name, table):
     n = (table['op'] & 0xF) << 20 | 0x9 << 4
     if 'acc' in table and table['acc']:
@@ -246,14 +256,14 @@
                             | (rn & 0xF))
 
     elif 'long' in table and table['long']:
-       def f(self, rdlo, rdhi, rn, rm, cond=cond.AL):
+        def f(self, rdlo, rdhi, rn, rm, cond=cond.AL):
             assert rdhi != rdlo
             self.write32(n
-                        | cond << 28
-                        | (rdhi & 0xF) << 16
-                        | (rdlo & 0xF) << 12
-                        | (rm & 0xF) << 8
-                        | (rn & 0xF))
+                    | cond << 28
+                    | (rdhi & 0xF) << 16
+                    | (rdlo & 0xF) << 12
+                    | (rm & 0xF) << 8
+                    | (rn & 0xF))
     else:
         def f(self, rd, rn, rm, cond=cond.AL, s=0):
             self.write32(n
@@ -265,8 +275,10 @@
 
     return f
 
+
 def define_block_data_func(name, table):
     n = (table['op'] & 0x3F) << 20
+
     def f(self, rn, regs, w=0, cond=cond.AL):
         # no R bit for now at bit 15
         instr = (n
@@ -278,6 +290,8 @@
         self.write32(instr)
 
     return f
+
+
 def define_float_load_store_func(name, table):
     n = (0x3 << 26
         | (table['opcode'] & 0x1F) << 20
@@ -288,9 +302,9 @@
     # the value actually encoded is imm / 4
     def f(self, dd, rn, imm=0, cond=cond.AL):
         assert imm % 4 == 0
-        imm = imm/4
+        imm = imm / 4
         u, imm = self._encode_imm(imm)
-        instr = ( n
+        instr = (n
                 | (cond & 0xF) << 28
                 | (u & 0x1) << 23
                 | (rn & 0xF) << 16
@@ -299,10 +313,11 @@
         self.write32(instr)
     return f
 
+
 def define_float64_data_proc_instructions_func(name, table):
     n = (0xE << 24
         | 0x5 << 9
-        | 0x1 << 8 # 64 bit flag
+        | 0x1 << 8  # 64 bit flag
         | (table['opc3'] & 0x3) << 6)
 
     if 'opc1' in table:
@@ -335,11 +350,13 @@
             self.write32(instr)
     return f
 
+
 def imm_operation(rt, rn, imm):
     return ((rn & 0xFF) << 16
     | (rt & 0xFF) << 12
     | (imm & 0xFFF))
 
+
 def reg_operation(rt, rn, rm, imm, s, shifttype):
     return ((s & 0x1) << 20
             | (rn & 0xF) << 16
@@ -348,10 +365,12 @@
             | (shifttype & 0x3) << 5
             | (rm & 0xF))
 
+
 def define_instruction(builder, key, val, target):
     f = builder(key, val)
     setattr(target, key, f)
 
+
 def define_instructions(target):
     inss = [k for k in instructions.__dict__.keys() if not k.startswith('__')]
     for name in inss:
diff --git a/pypy/jit/backend/arm/instructions.py b/pypy/jit/backend/arm/instructions.py
--- a/pypy/jit/backend/arm/instructions.py
+++ b/pypy/jit/backend/arm/instructions.py
@@ -1,93 +1,94 @@
 load_store = {
-    'STR_ri': {'A':0, 'op1': 0x0, 'op1not': 0x2, 'imm': True},
-    'STR_rr': {'A':1, 'op1': 0x0, 'op1not': 0x2, 'B': 0, 'imm': False},
-    'LDR_ri': {'A':0, 'op1': 0x1, 'op1not': 0x3, 'imm': True},
-    'LDR_rr': {'A':1, 'op1': 0x1, 'op1not': 0x3, 'B': 0, 'imm': False},
-    'STRB_ri': {'A':0, 'op1': 0x4, 'op1not': 0x6, 'rn':'!0xF', 'imm': True},
-    'STRB_rr': {'A':1, 'op1': 0x4, 'op1not': 0x6, 'B': 0, 'imm': False},
-    'LDRB_ri': {'A':0, 'op1': 0x5, 'op1not': 0x7, 'rn':'!0xF', 'imm': True},
-    'LDRB_rr': {'A':1, 'op1': 0x5, 'op1not': 0x7, 'B': 0, 'imm': False},
+    'STR_ri': {'A': 0, 'op1': 0x0, 'op1not': 0x2, 'imm': True},
+    'STR_rr': {'A': 1, 'op1': 0x0, 'op1not': 0x2, 'B': 0, 'imm': False},
+    'LDR_ri': {'A': 0, 'op1': 0x1, 'op1not': 0x3, 'imm': True},
+    'LDR_rr': {'A': 1, 'op1': 0x1, 'op1not': 0x3, 'B': 0, 'imm': False},
+    'STRB_ri': {'A': 0, 'op1': 0x4, 'op1not': 0x6, 'rn': '!0xF', 'imm': True},
+    'STRB_rr': {'A': 1, 'op1': 0x4, 'op1not': 0x6, 'B': 0, 'imm': False},
+    'LDRB_ri': {'A': 0, 'op1': 0x5, 'op1not': 0x7, 'rn': '!0xF', 'imm': True},
+    'LDRB_rr': {'A': 1, 'op1': 0x5, 'op1not': 0x7, 'B': 0, 'imm': False},
 }
-extra_load_store = { #Section 5.2.8
+extra_load_store = {  # Section 5.2.8
     'STRH_rr':  {'op2': 0x1, 'op1': 0x0},
     'LDRH_rr':  {'op2': 0x1, 'op1': 0x1},
     'STRH_ri':  {'op2': 0x1, 'op1': 0x4},
-    'LDRH_ri':  {'op2': 0x1, 'op1': 0x5, 'rn':'!0xF'},
+    'LDRH_ri':  {'op2': 0x1, 'op1': 0x5, 'rn': '!0xF'},
     'LDRD_rr':  {'op2': 0x2, 'op1': 0x0},
     'LDRSB_rr': {'op2': 0x2, 'op1': 0x1},
     'LDRD_ri':  {'op2': 0x2, 'op1': 0x4},
-    'LDRSB_ri': {'op2': 0x2, 'op1': 0x5, 'rn':'!0xF'},
+    'LDRSB_ri': {'op2': 0x2, 'op1': 0x5, 'rn': '!0xF'},
     'STRD_rr':  {'op2': 0x3, 'op1': 0x0},
     'LDRSH_rr': {'op2': 0x3, 'op1': 0x1},
     'STRD_ri':  {'op2': 0x3, 'op1': 0x4},
-    'LDRSH_ri': {'op2': 0x3, 'op1': 0x5, 'rn':'!0xF'},
+    'LDRSH_ri': {'op2': 0x3, 'op1': 0x5, 'rn': '!0xF'},
 }
 
 
 data_proc = {
-    'AND_rr': {'op1':0x0, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'EOR_rr': {'op1':0x2, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'SUB_rr': {'op1':0x4, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'RSB_rr': {'op1':0x6, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'ADD_rr': {'op1':0x8, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'ADC_rr': {'op1':0xA, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'SBC_rr': {'op1':0xC, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'RSC_rr': {'op1':0xE, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'TST_rr': {'op1':0x11, 'op2':0, 'op3':0, 'result':False, 'base':True},
-    'TEQ_rr': {'op1':0x13, 'op2':0, 'op3':0, 'result':False, 'base':True},
-    'CMP_rr': {'op1':0x15, 'op2':0, 'op3':0, 'result':False, 'base':True},
-    'CMN_rr': {'op1':0x17, 'op2':0, 'op3':0, 'result':False, 'base':True},
-    'ORR_rr': {'op1':0x18, 'op2':0, 'op3':0, 'result':True, 'base':True},
-    'MOV_rr': {'op1':0x1A, 'op2':0, 'op3':0, 'result':True, 'base':False},
-    'LSL_ri': {'op1':0x1A, 'op2':0x0, 'op3':0, 'op2cond':'!0', 'result':False, 'base':True},
-    'LSR_ri': {'op1':0x1A, 'op2':0, 'op3':0x1, 'op2cond':'', 'result':False, 'base':True},
-    'ASR_ri': {'op1':0x1A, 'op2':0, 'op3':0x2, 'op2cond':'', 'result':False, 'base':True},
-    #'RRX_ri': {'op1':0x1A, 'op2':0, 'op3':0x3, 'op2cond':'0', 'result':False, 'base':True},
-    'ROR_ri': {'op1':0x1A, 'op2':0x0, 'op3':0x3, 'op2cond':'!0', 'result':True, 'base':False},
-    #BIC
-    'MVN_rr': {'op1':0x1E, 'op2':0x0, 'op3':0x0, 'result':True, 'base':False},
+    'AND_rr': {'op1': 0x0, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'EOR_rr': {'op1': 0x2, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'SUB_rr': {'op1': 0x4, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'RSB_rr': {'op1': 0x6, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'ADD_rr': {'op1': 0x8, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'ADC_rr': {'op1': 0xA, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'SBC_rr': {'op1': 0xC, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'RSC_rr': {'op1': 0xE, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'TST_rr': {'op1': 0x11, 'op2': 0, 'op3': 0, 'result': False, 'base': True},
+    'TEQ_rr': {'op1': 0x13, 'op2': 0, 'op3': 0, 'result': False, 'base': True},
+    'CMP_rr': {'op1': 0x15, 'op2': 0, 'op3': 0, 'result': False, 'base': True},
+    'CMN_rr': {'op1': 0x17, 'op2': 0, 'op3': 0, 'result': False, 'base': True},
+    'ORR_rr': {'op1': 0x18, 'op2': 0, 'op3': 0, 'result': True, 'base': True},
+    'MOV_rr': {'op1': 0x1A, 'op2': 0, 'op3': 0, 'result': True, 'base': False},
+    'LSL_ri': {'op1': 0x1A, 'op2': 0x0, 'op3': 0, 'op2cond': '!0',
+                                                'result': False, 'base': True},
+    'LSR_ri': {'op1': 0x1A, 'op2': 0, 'op3': 0x1, 'op2cond': '',
+                                                'result': False, 'base': True},
+    'ASR_ri': {'op1': 0x1A, 'op2': 0, 'op3': 0x2, 'op2cond': '',
+                                                'result': False, 'base': True},
+    'ROR_ri': {'op1': 0x1A, 'op2': 0x0, 'op3': 0x3, 'op2cond': '!0',
+                                                'result': True, 'base': False},
+    'MVN_rr': {'op1': 0x1E, 'op2': 0x0, 'op3': 0x0, 'result': True,
+                                                                'base': False},
 
 }
 
 data_proc_reg_shift_reg = {
-    'AND_rr_sr': {'op1':0x0,  'op2':0},
-    'EOR_rr_sr': {'op1':0x2,  'op2':0},
-    'SUB_rr_sr': {'op1':0x4,  'op2':0},
-    'RSB_rr_sr': {'op1':0x6,  'op2':0},
-    'ADD_rr_sr': {'op1':0x8,  'op2':0},
-    'ADC_rr_sr': {'op1':0xA,  'op2':0},
-    'SBC_rr_sr': {'op1':0xC,  'op2':0},
-    'RSC_rr_sr': {'op1':0xE,  'op2':0},
-    'TST_rr_sr': {'op1':0x11, 'op2':0, 'result': False},
-    'TEQ_rr_sr': {'op1':0x13, 'op2':0, 'result': False},
-    'CMP_rr_sr': {'op1':0x15, 'op2':0, 'result': False},
-    'CMN_rr_sr': {'op1':0x17, 'op2':0, 'result': False},
-    'ORR_rr_sr': {'op1':0x18, 'op2':0},
-    'LSL_rr': {'op1':0x1A, 'op2':0, },
-    'LSR_rr': {'op1':0x1A, 'op2':0x1},
-    'ASR_rr': {'op1':0x1A, 'op2':0x2},
-    #'RRX_rr': {'op1':0x1A, 'op2':0,},
-    'ROR_rr': {'op1':0x1A, 'op2':0x3},
-    # BIC, MVN
+    'AND_rr_sr': {'op1': 0x0,  'op2': 0},
+    'EOR_rr_sr': {'op1': 0x2,  'op2': 0},
+    'SUB_rr_sr': {'op1': 0x4,  'op2': 0},
+    'RSB_rr_sr': {'op1': 0x6,  'op2': 0},
+    'ADD_rr_sr': {'op1': 0x8,  'op2': 0},
+    'ADC_rr_sr': {'op1': 0xA,  'op2': 0},
+    'SBC_rr_sr': {'op1': 0xC,  'op2': 0},
+    'RSC_rr_sr': {'op1': 0xE,  'op2': 0},
+    'TST_rr_sr': {'op1': 0x11, 'op2': 0, 'result': False},
+    'TEQ_rr_sr': {'op1': 0x13, 'op2': 0, 'result': False},
+    'CMP_rr_sr': {'op1': 0x15, 'op2': 0, 'result': False},
+    'CMN_rr_sr': {'op1': 0x17, 'op2': 0, 'result': False},
+    'ORR_rr_sr': {'op1': 0x18, 'op2': 0},
+    'LSL_rr': {'op1': 0x1A, 'op2': 0, },
+    'LSR_rr': {'op1': 0x1A, 'op2': 0x1},
+    'ASR_rr': {'op1': 0x1A, 'op2': 0x2},
+    'ROR_rr': {'op1': 0x1A, 'op2': 0x3},
 }
 
 data_proc_imm = {
-    'AND_ri': {'op': 0, 'result':True, 'base':True},
-    'EOR_ri': {'op': 0x2, 'result':True, 'base':True},
-    'SUB_ri': {'op': 0x4, 'result':True, 'base':True},
-    'RSB_ri': {'op': 0x6, 'result':True, 'base':True},
-    'ADD_ri': {'op': 0x8, 'result':True, 'base':True},
-    'ADC_ri': {'op': 0xA, 'result':True, 'base':True},
-    'SBC_ri': {'op': 0xC, 'result':True, 'base':True},
-    'RSC_ri': {'op': 0xE, 'result':True, 'base':True},
-    'TST_ri': {'op': 0x11, 'result':False, 'base':True},
-    'TEQ_ri': {'op': 0x13, 'result':False, 'base':True},
-    'CMP_ri': {'op': 0x15, 'result':False, 'base':True},
-    'CMN_ri': {'op': 0x17, 'result':False, 'base':True},
-    'ORR_ri': {'op': 0x18, 'result':True, 'base':True},
-    'MOV_ri': {'op': 0x1A, 'result':True, 'base':False},
-    'BIC_ri': {'op': 0x1C, 'result':True, 'base':True},
-    'MVN_ri': {'op': 0x1E, 'result':True, 'base':False},
+    'AND_ri': {'op': 0, 'result': True, 'base': True},
+    'EOR_ri': {'op': 0x2, 'result': True, 'base': True},
+    'SUB_ri': {'op': 0x4, 'result': True, 'base': True},
+    'RSB_ri': {'op': 0x6, 'result': True, 'base': True},
+    'ADD_ri': {'op': 0x8, 'result': True, 'base': True},
+    'ADC_ri': {'op': 0xA, 'result': True, 'base': True},
+    'SBC_ri': {'op': 0xC, 'result': True, 'base': True},
+    'RSC_ri': {'op': 0xE, 'result': True, 'base': True},
+    'TST_ri': {'op': 0x11, 'result': False, 'base': True},
+    'TEQ_ri': {'op': 0x13, 'result': False, 'base': True},
+    'CMP_ri': {'op': 0x15, 'result': False, 'base': True},
+    'CMN_ri': {'op': 0x17, 'result': False, 'base': True},
+    'ORR_ri': {'op': 0x18, 'result': True, 'base': True},
+    'MOV_ri': {'op': 0x1A, 'result': True, 'base': False},
+    'BIC_ri': {'op': 0x1C, 'result': True, 'base': True},
+    'MVN_ri': {'op': 0x1E, 'result': True, 'base': False},
 }
 
 supervisor_and_coproc = {
diff --git a/pypy/jit/backend/arm/jump.py b/pypy/jit/backend/arm/jump.py
--- a/pypy/jit/backend/arm/jump.py
+++ b/pypy/jit/backend/arm/jump.py
@@ -1,7 +1,7 @@
 # ../x86/jump.py
 # XXX combine with ../x86/jump.py and move to llsupport
 import sys
-from pypy.tool.pairtype import extendabletype
+
 
 def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg):
     pending_dests = len(dst_locations)
@@ -65,12 +65,14 @@
                     assembler.regalloc_pop(dst)
             assert pending_dests == 0
 
+
 def _move(assembler, src, dst, tmpreg):
     if dst.is_stack() and src.is_stack():
         assembler.regalloc_mov(src, tmpreg)
         src = tmpreg
     assembler.regalloc_mov(src, dst)
 
+
 def remap_frame_layout_mixed(assembler,
                              src_locations1, dst_locations1, tmpreg1,
                              src_locations2, dst_locations2, tmpreg2):
@@ -84,7 +86,7 @@
     src_locations2red = []
     dst_locations2red = []
     for i in range(len(src_locations2)):
-        loc    = src_locations2[i]
+        loc = src_locations2[i]
         dstloc = dst_locations2[i]
         if loc.is_stack():
             key = loc.as_key()
diff --git a/pypy/jit/backend/arm/locations.py b/pypy/jit/backend/arm/locations.py
--- a/pypy/jit/backend/arm/locations.py
+++ b/pypy/jit/backend/arm/locations.py
@@ -1,5 +1,7 @@
-from pypy.jit.metainterp.history import INT, FLOAT, REF
+from pypy.jit.metainterp.history import INT, FLOAT
 from pypy.jit.backend.arm.arch import WORD
+
+
 class AssemblerLocation(object):
     _immutable_ = True
     type = INT
@@ -22,6 +24,7 @@
     def as_key(self):
         raise NotImplementedError
 
+
 class RegisterLocation(AssemblerLocation):
     _immutable_ = True
     width = WORD
@@ -38,13 +41,15 @@
     def as_key(self):
         return self.value
 
+
 class VFPRegisterLocation(RegisterLocation):
     _immutable_ = True
-    type = FLOAT 
-    width = 2*WORD
+    type = FLOAT
+    width = 2 * WORD
 
     def get_single_precision_regs(self):
-        return [VFPRegisterLocation(i) for i in [self.value*2, self.value*2+1]]
+        return [VFPRegisterLocation(i) for i in
+                        [self.value * 2, self.value * 2 + 1]]
 
     def __repr__(self):
         return 'vfp%d' % self.value
@@ -58,11 +63,11 @@
     def as_key(self):
         return self.value + 20
 
+
 class ImmLocation(AssemblerLocation):
     _immutable_ = True
     width = WORD
 
-
     def __init__(self, value):
         self.value = value
 
@@ -78,11 +83,12 @@
     def as_key(self):
         return self.value + 40
 
+
 class ConstFloatLoc(AssemblerLocation):
     """This class represents an imm float value which is stored in memory at
     the address stored in the field value"""
     _immutable_ = True
-    width = 2*WORD
+    width = 2 * WORD
     type = FLOAT
 
     def __init__(self, value):
@@ -100,6 +106,7 @@
     def as_key(self):
         return -1 * self.value
 
+
 class StackLocation(AssemblerLocation):
     _immutable_ = True
 
@@ -123,5 +130,6 @@
     def as_key(self):
         return -self.position
 
+
 def imm(i):
     return ImmLocation(i)
diff --git a/pypy/jit/backend/arm/opassembler.py b/pypy/jit/backend/arm/opassembler.py
--- a/pypy/jit/backend/arm/opassembler.py
+++ b/pypy/jit/backend/arm/opassembler.py
@@ -1,39 +1,35 @@
 from __future__ import with_statement
 from pypy.jit.backend.arm import conditions as c
-from pypy.jit.backend.arm import locations
 from pypy.jit.backend.arm import registers as r
 from pypy.jit.backend.arm import shift
 from pypy.jit.backend.arm.arch import WORD, PC_OFFSET
 
 from pypy.jit.backend.arm.helper.assembler import (gen_emit_op_by_helper_call,
-                                                    gen_emit_op_unary_cmp,
-                                                    gen_emit_guard_unary_cmp,
-                                                    gen_emit_op_ri,
-                                                    gen_emit_cmp_op,
-                                                    gen_emit_cmp_op_guard,
-                                                    gen_emit_float_op,
-                                                    gen_emit_float_cmp_op,
-                                                    gen_emit_float_cmp_op_guard,
-                                                    gen_emit_unary_float_op, 
-                                                    saved_registers,
-                                                    count_reg_args)
+                                                gen_emit_op_unary_cmp,
+                                                gen_emit_guard_unary_cmp,
+                                                gen_emit_op_ri,
+                                                gen_emit_cmp_op,
+                                                gen_emit_cmp_op_guard,
+                                                gen_emit_float_op,
+                                                gen_emit_float_cmp_op,
+                                                gen_emit_float_cmp_op_guard,
+                                                gen_emit_unary_float_op,
+                                                saved_registers,
+                                                count_reg_args)
 from pypy.jit.backend.arm.codebuilder import ARMv7Builder, OverwritingBuilder
 from pypy.jit.backend.arm.jump import remap_frame_layout
-from pypy.jit.backend.arm.regalloc import Regalloc, TempInt, TempPtr
+from pypy.jit.backend.arm.regalloc import TempInt, TempPtr
 from pypy.jit.backend.arm.locations import imm
 from pypy.jit.backend.llsupport import symbolic
-from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr
-from pypy.jit.backend.llsupport.regalloc import compute_vars_longevity
-from pypy.jit.metainterp.history import (Const, ConstInt, BoxInt, Box,
-                                        AbstractFailDescr, LoopToken, INT, FLOAT, REF)
+from pypy.jit.metainterp.history import (Box, AbstractFailDescr,
+                                            LoopToken, INT, FLOAT, REF)
 from pypy.jit.metainterp.resoperation import rop
-from pypy.rlib import rgc
 from pypy.rlib.objectmodel import we_are_translated
-from pypy.rpython.annlowlevel import llhelper
-from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory
+from pypy.rpython.lltypesystem import lltype, rffi, rstr
 
 NO_FORCE_INDEX = -1
 
+
 class GuardToken(object):
     def __init__(self, descr, failargs, faillocs, offset,
                             save_exc, fcond=c.AL, is_invalidate=False):
@@ -44,7 +40,8 @@
         self.failargs = failargs
         self.faillocs = faillocs
         self.save_exc = save_exc
-        self.fcond=fcond
+        self.fcond = fcond
+
 
 class IntOpAsslember(object):
 
@@ -94,10 +91,12 @@
     def emit_guard_int_mul_ovf(self, op, guard, arglocs, regalloc, fcond):
         reg1 = arglocs[0]
         reg2 = arglocs[1]
-        res =  arglocs[2]
+        res = arglocs[2]
         failargs = arglocs[3:]
-        self.mc.SMULL(res.value, r.ip.value, reg1.value, reg2.value, cond=fcond)
-        self.mc.CMP_rr(r.ip.value, res.value, shifttype=shift.ASR, imm=31, cond=fcond)
+        self.mc.SMULL(res.value, r.ip.value, reg1.value, reg2.value,
+                                                                cond=fcond)
+        self.mc.CMP_rr(r.ip.value, res.value, shifttype=shift.ASR,
+                                                        imm=31, cond=fcond)
 
         if guard.getopnum() == rop.GUARD_OVERFLOW:
             fcond = self._emit_guard(guard, failargs, c.NE, save_exc=False)
@@ -111,7 +110,7 @@
         self.emit_op_int_add(op, arglocs[0:3], regalloc, fcond, flags=True)
         self._emit_guard_overflow(guard, arglocs[3:], fcond)
         return fcond
-    
+
     def emit_guard_int_sub_ovf(self, op, guard, arglocs, regalloc, fcond):
         self.emit_op_int_sub(op, arglocs[0:3], regalloc, fcond, flags=True)
         self._emit_guard_overflow(guard, arglocs[3:], fcond)
@@ -135,8 +134,6 @@
     emit_op_int_gt = gen_emit_cmp_op('int_gt', c.GT)
     emit_op_int_ge = gen_emit_cmp_op('int_ge', c.GE)
 
-
-
     emit_guard_int_lt = gen_emit_cmp_op_guard('int_lt', c.LT)
     emit_guard_int_le = gen_emit_cmp_op_guard('int_le', c.LE)
     emit_guard_int_eq = gen_emit_cmp_op_guard('int_eq', c.EQ)
@@ -163,7 +160,6 @@
     emit_op_int_sub_ovf = emit_op_int_sub
 
 
-
 class UnaryIntOpAssembler(object):
 
     _mixin_ = True
@@ -185,19 +181,20 @@
         self.mc.RSB_ri(resloc.value, l0.value, imm=0)
         return fcond
 
+
 class GuardOpAssembler(object):
 
     _mixin_ = True
 
-    def _emit_guard(self, op, arglocs, fcond, save_exc, is_guard_not_invalidated=False):
+    def _emit_guard(self, op, arglocs, fcond, save_exc,
+                                    is_guard_not_invalidated=False):
         assert isinstance(save_exc, bool)
         assert isinstance(fcond, int)
         descr = op.getdescr()
         assert isinstance(descr, AbstractFailDescr)
 
-
         if not we_are_translated() and hasattr(op, 'getfailargs'):
-           print 'Failargs: ', op.getfailargs()
+            print 'Failargs: ', op.getfailargs()
 
         pos = self.mc.currpos()
         # For all guards that are not GUARD_NOT_INVALIDATED we emit a
@@ -283,7 +280,8 @@
         return fcond
 
     def emit_op_guard_not_invalidated(self, op, locs, regalloc, fcond):
-        return self._emit_guard(op, locs, fcond, save_exc=False, is_guard_not_invalidated=True)
+        return self._emit_guard(op, locs, fcond, save_exc=False,
+                                            is_guard_not_invalidated=True)
 
     def _cmp_guard_class(self, op, locs, regalloc, fcond):
         offset = locs[2]
@@ -316,12 +314,13 @@
         else:
             target = descr._arm_bootstrap_code + descr._arm_loop_code
             self.mc.B(target, fcond)
-            new_fd = max(regalloc.frame_manager.frame_depth, descr._arm_frame_depth)
+            new_fd = max(regalloc.frame_manager.frame_depth,
+                                                descr._arm_frame_depth)
             regalloc.frame_manager.frame_depth = new_fd
         return fcond
 
     def emit_op_finish(self, op, arglocs, regalloc, fcond):
-        for i in range(len(arglocs) -1):
+        for i in range(len(arglocs) - 1):
             loc = arglocs[i]
             box = op.getarg(i)
             if loc is None:
@@ -367,16 +366,18 @@
         self.gen_func_epilog()
         return fcond
 
-    def emit_op_call(self, op, args, regalloc, fcond, force_index=NO_FORCE_INDEX):
+    def emit_op_call(self, op, args, regalloc, fcond,
+                                force_index=NO_FORCE_INDEX):
         adr = args[0].value
         arglist = op.getarglist()[1:]
         if force_index == NO_FORCE_INDEX:
             force_index = self.write_new_force_index()
-        cond =  self._emit_call(force_index, adr, arglist, 
+        cond = self._emit_call(force_index, adr, arglist,
                                     regalloc, fcond, op.result)
         descr = op.getdescr()
         #XXX Hack, Hack, Hack
-        if op.result and not we_are_translated() and not isinstance(descr, LoopToken):
+        if (op.result and not we_are_translated()
+                        and not isinstance(descr, LoopToken)):
             #XXX check result type
             loc = regalloc.rm.call_result_location(op.result)
             size = descr.get_result_size(False)
@@ -388,11 +389,11 @@
     # emit_op_call_may_force
     # XXX improve freeing of stuff here
     # XXX add an interface that takes locations instead of boxes
-    def _emit_call(self, force_index, adr, args, regalloc, fcond=c.AL, result=None):
+    def _emit_call(self, force_index, adr, args, regalloc, fcond=c.AL,
+                                                            result=None):
         n_args = len(args)
         reg_args = count_reg_args(args)
 
-
         # all arguments past the 4th go on the stack
         n = 0   # used to count the number of words pushed on the stack, so we
                 #can later modify the SP back to its original value
@@ -417,15 +418,15 @@
                 stack_args.append(None)
 
             #then we push every thing on the stack
-            for i in range(len(stack_args) -1, -1, -1):
+            for i in range(len(stack_args) - 1, -1, -1):
                 arg = stack_args[i]
                 if arg is None:
                     self.mc.PUSH([r.ip.value])
                 else:
                     self.regalloc_push(regalloc.loc(arg))
 
-        # collect variables that need to go in registers
-        # and the registers they will be stored in 
+        # collect variables that need to go in registers and the registers they
+        # will be stored in
         num = 0
         count = 0
         non_float_locs = []
@@ -457,7 +458,7 @@
         remap_frame_layout(self, non_float_locs, non_float_regs, r.ip)
 
         for loc, reg in float_locs:
-            self.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value+1])
+            self.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value + 1])
 
         #the actual call
         self.mc.BL(adr)
@@ -557,9 +558,8 @@
                 callargs = [r.r0, r.r1, r.r2]
             remap_frame_layout(self, arglocs, callargs, r.ip)
             func = rffi.cast(lltype.Signed, addr)
-            #
-            # misaligned stack in the call, but it's ok because the write barrier
-            # is not going to call anything more.  
+            # misaligned stack in the call, but it's ok because the write
+            # barrier is not going to call anything more.
             self.mc.BL(func)
 
         # patch the JZ above
@@ -570,6 +570,7 @@
 
     emit_op_cond_call_gc_wb_array = emit_op_cond_call_gc_wb
 
+
 class FieldOpAssembler(object):
 
     _mixin_ = True
@@ -652,7 +653,8 @@
     emit_op_getfield_gc_pure = emit_op_getfield_gc
 
     def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond):
-        base_loc, index_loc, res_loc, ofs_loc, ofs, itemsize, fieldsize = arglocs
+        (base_loc, index_loc, res_loc,
+            ofs_loc, ofs, itemsize, fieldsize) = arglocs
         self.mc.gen_load_int(r.ip.value, itemsize.value)
         self.mc.MUL(r.ip.value, index_loc.value, r.ip.value)
         if ofs.value > 0:
@@ -684,7 +686,8 @@
         return fcond
 
     def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond):
-        base_loc, index_loc, value_loc, ofs_loc, ofs, itemsize, fieldsize = arglocs
+        (base_loc, index_loc, value_loc,
+            ofs_loc, ofs, itemsize, fieldsize) = arglocs
         self.mc.gen_load_int(r.ip.value, itemsize.value)
         self.mc.MUL(r.ip.value, index_loc.value, r.ip.value)
         if ofs.value > 0:
@@ -710,8 +713,6 @@
         return fcond
 
 
-
-
 class ArrayOpAssember(object):
 
     _mixin_ = True
@@ -730,7 +731,7 @@
         else:
             scale_loc = ofs_loc
 
-        # add the base offset  
+        # add the base offset
         if ofs.value > 0:
             self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value)
             scale_loc = r.ip
@@ -741,11 +742,14 @@
             self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value)
             self.mc.VSTR(value_loc.value, r.ip.value, cond=fcond)
         elif scale.value == 2:
-            self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.STR_rr(value_loc.value, base_loc.value, scale_loc.value,
+                                                                    cond=fcond)
         elif scale.value == 1:
-            self.mc.STRH_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.STRH_rr(value_loc.value, base_loc.value, scale_loc.value,
+                                                                    cond=fcond)
         elif scale.value == 0:
-            self.mc.STRB_rr(value_loc.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.STRB_rr(value_loc.value, base_loc.value, scale_loc.value,
+                                                                    cond=fcond)
         else:
             assert 0
         return fcond
@@ -761,7 +765,7 @@
         else:
             scale_loc = ofs_loc
 
-        # add the base offset  
+        # add the base offset
         if ofs.value > 0:
             self.mc.ADD_ri(r.ip.value, scale_loc.value, imm=ofs.value)
             scale_loc = r.ip
@@ -772,18 +776,21 @@
             self.mc.ADD_rr(r.ip.value, base_loc.value, scale_loc.value)
             self.mc.VLDR(res.value, r.ip.value, cond=fcond)
         elif scale.value == 2:
-            self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.LDR_rr(res.value, base_loc.value, scale_loc.value,
+                                                                cond=fcond)
         elif scale.value == 1:
-            self.mc.LDRH_rr(res.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.LDRH_rr(res.value, base_loc.value, scale_loc.value,
+                                                                cond=fcond)
         elif scale.value == 0:
-            self.mc.LDRB_rr(res.value, base_loc.value, scale_loc.value, cond=fcond)
+            self.mc.LDRB_rr(res.value, base_loc.value, scale_loc.value,
+                                                                cond=fcond)
         else:
             assert 0
 
         #XXX Hack, Hack, Hack
         if not we_are_translated():
             descr = op.getdescr()
-            size =  descr.get_item_size(False)
+            size = descr.get_item_size(False)
             signed = descr.is_item_signed()
             self._ensure_result_bit_extension(res, size, signed)
         return fcond
@@ -807,9 +814,11 @@
     def emit_op_strgetitem(self, op, arglocs, regalloc, fcond):
         res, base_loc, ofs_loc, basesize = arglocs
         if ofs_loc.is_imm():
-            self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), cond=fcond)
+            self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(),
+                                                                    cond=fcond)
         else:
-            self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond)
+            self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value,
+                                                                    cond=fcond)
 
         self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond)
         return fcond
@@ -817,11 +826,14 @@
     def emit_op_strsetitem(self, op, arglocs, regalloc, fcond):
         value_loc, base_loc, ofs_loc, basesize = arglocs
         if ofs_loc.is_imm():
-            self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), cond=fcond)
+            self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(),
+                                                            cond=fcond)
         else:
-            self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond)
+            self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value,
+                                                            cond=fcond)
 
-        self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, cond=fcond)
+        self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value,
+                                                            cond=fcond)
         return fcond
 
     #from ../x86/regalloc.py:928 ff.
@@ -844,18 +856,20 @@
         regalloc.possibly_free_var(args[0])
         regalloc.free_temp_vars()
         if args[3] is not args[2] is not args[4]:  # MESS MESS MESS: don't free
-            regalloc.possibly_free_var(args[2])     # it if ==args[3] or args[4]
+            regalloc.possibly_free_var(args[2])  # it if ==args[3] or args[4]
             regalloc.free_temp_vars()
         srcaddr_box = TempPtr()
         forbidden_vars = [args[1], args[3], args[4], srcaddr_box]
-        srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box, selected_reg=r.r1)
+        srcaddr_loc = regalloc.force_allocate_reg(srcaddr_box,
+                                                        selected_reg=r.r1)
         self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc,
                                         is_unicode=is_unicode)
 
         # compute the destination address
         forbidden_vars = [args[4], args[3], srcaddr_box]
         dstaddr_box = TempPtr()
-        dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box, selected_reg=r.r0)
+        dstaddr_loc = regalloc.force_allocate_reg(dstaddr_box,
+                                                        selected_reg=r.r0)
         forbidden_vars.append(dstaddr_box)
         base_loc = regalloc._ensure_value_is_boxed(args[1], forbidden_vars)
         ofs_loc = regalloc._ensure_value_is_boxed(args[3], forbidden_vars)
@@ -878,35 +892,35 @@
         else:
             length_box = TempInt()
             length_loc = regalloc.force_allocate_reg(length_box,
-                                                forbidden_vars, selected_reg = r.r2)
+                                        forbidden_vars, selected_reg=r.r2)
             imm = regalloc.convert_to_imm(args[4])
             self.load(length_loc, imm)
         if is_unicode:
             bytes_box = TempPtr()
-            bytes_loc = regalloc.force_allocate_reg(bytes_box, forbidden_vars, selected_reg=r.r2)
+            bytes_loc = regalloc.force_allocate_reg(bytes_box,
+                                        forbidden_vars, selected_reg=r.r2)
             scale = self._get_unicode_item_scale()
             assert length_loc.is_reg()
-            self.mc.MOV_ri(r.ip.value, 1<<scale)
+            self.mc.MOV_ri(r.ip.value, 1 << scale)
             self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value)
             length_box = bytes_box
             length_loc = bytes_loc
         # call memcpy()
-        self._emit_call(NO_FORCE_INDEX, self.memcpy_addr, [dstaddr_box, srcaddr_box, length_box], regalloc)
+        self._emit_call(NO_FORCE_INDEX, self.memcpy_addr,
+                            [dstaddr_box, srcaddr_box, length_box], regalloc)
 
         regalloc.possibly_free_var(length_box)
         regalloc.possibly_free_var(dstaddr_box)
         regalloc.possibly_free_var(srcaddr_box)
 
-
     def _gen_address_inside_string(self, baseloc, ofsloc, resloc, is_unicode):
-        cpu = self.cpu
         if is_unicode:
             ofs_items, _, _ = symbolic.get_array_token(rstr.UNICODE,
-                                                  self.cpu.translate_support_code)
+                                              self.cpu.translate_support_code)
             scale = self._get_unicode_item_scale()
         else:
             ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
-                                                  self.cpu.translate_support_code)
+                                              self.cpu.translate_support_code)
             assert itemsize == 1
             scale = 0
         self._gen_address(ofsloc, ofs_items, scale, resloc, baseloc)
@@ -927,7 +941,7 @@
 
     def _get_unicode_item_scale(self):
         _, itemsize, _ = symbolic.get_array_token(rstr.UNICODE,
-                                                  self.cpu.translate_support_code)
+                                              self.cpu.translate_support_code)
         if itemsize == 4:
             return 2
         elif itemsize == 2:
@@ -935,6 +949,7 @@
         else:
             raise AssertionError("bad unicode item size")
 
+
 class UnicodeOpAssembler(object):
 
     _mixin_ = True
@@ -944,7 +959,7 @@
     def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond):
         res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs
         self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond,
-                                            imm=scale.value, shifttype=shift.LSL)
+                                        imm=scale.value, shifttype=shift.LSL)
         if scale.value == 2:
             self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond)
         elif scale.value == 1:
@@ -958,14 +973,17 @@
         self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond,
                                         imm=scale.value, shifttype=shift.LSL)
         if scale.value == 2:
-            self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, cond=fcond)
+            self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value,
+                                                                    cond=fcond)
         elif scale.value == 1:
-            self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, cond=fcond)
+            self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value,
+                                                                    cond=fcond)
         else:
             assert 0, itemsize.value
 
         return fcond
 
+
 class ForceOpAssembler(object):
 
     _mixin_ = True
@@ -977,7 +995,8 @@
 
     # from: ../x86/assembler.py:1668
     # XXX Split into some helper methods
-    def emit_guard_call_assembler(self, op, guard_op, arglocs, regalloc, fcond):
+    def emit_guard_call_assembler(self, op, guard_op, arglocs, regalloc,
+                                                                    fcond):
         faildescr = guard_op.getdescr()
         fail_index = self.cpu.get_fail_descr_number(faildescr)
         self._write_fail_index(fail_index)
@@ -987,8 +1006,8 @@
         # XXX check this
         assert op.numargs() == len(descr._arm_arglocs[0])
         resbox = TempInt()
-        self._emit_call(fail_index, descr._arm_direct_bootstrap_code, op.getarglist(),
-                                regalloc, fcond, result=resbox)
+        self._emit_call(fail_index, descr._arm_direct_bootstrap_code,
+                        op.getarglist(), regalloc, fcond, result=resbox)
         if op.result is None:
             value = self.cpu.done_with_this_frame_void_v
         else:
@@ -1018,7 +1037,7 @@
         jd = descr.outermost_jitdriver_sd
         assert jd is not None
         asm_helper_adr = self.cpu.cast_adr_to_int(jd.assembler_helper_adr)
-        with saved_registers(self.mc, r.caller_resp[1:]+[r.ip], 
+        with saved_registers(self.mc, r.caller_resp[1:] + [r.ip],
                                     r.caller_vfp_resp):
             # resbox is allready in r0
             self.mov_loc_loc(arglocs[1], r.r1)
@@ -1086,7 +1105,6 @@
                                                     c.GE, save_exc=True)
         return fcond
 
-
     # ../x86/assembler.py:668
     def redirect_call_assembler(self, oldlooptoken, newlooptoken):
         # we overwrite the instructions at the old _x86_direct_bootstrap_code
@@ -1098,7 +1116,8 @@
         mc.B(target)
         mc.copy_to_raw_memory(oldadr)
 
-    def emit_guard_call_may_force(self, op, guard_op, arglocs, regalloc, fcond):
+    def emit_guard_call_may_force(self, op, guard_op, arglocs, regalloc,
+                                                                    fcond):
         self.mc.LDR_ri(r.ip.value, r.fp.value)
         self.mc.CMP_ri(r.ip.value, 0)
 
@@ -1117,7 +1136,8 @@
                 regs_to_save.append(reg)
         assert gcrootmap.is_shadow_stack
         with saved_registers(self.mc, regs_to_save):
-            self._emit_call(NO_FORCE_INDEX, self.releasegil_addr, [], self._regalloc, fcond)
+            self._emit_call(NO_FORCE_INDEX, self.releasegil_addr, [],
+                                                    self._regalloc, fcond)
 
     def call_reacquire_gil(self, gcrootmap, save_loc, fcond):
         # save the previous result into the stack temporarily.
@@ -1131,10 +1151,11 @@
             vfp_regs_to_save.append(save_loc)
         # call the reopenstack() function (also reacquiring the GIL)
         if len(regs_to_save) % 2 != 1:
-            regs_to_save.append(r.ip) # for alingment
+            regs_to_save.append(r.ip)  # for alingment
         assert gcrootmap.is_shadow_stack
         with saved_registers(self.mc, regs_to_save, vfp_regs_to_save):
-            self._emit_call(NO_FORCE_INDEX, self.reacqgil_addr, [], self._regalloc, fcond)
+            self._emit_call(NO_FORCE_INDEX, self.reacqgil_addr, [],
+                                                    self._regalloc, fcond)
 
     def write_new_force_index(self):
         # for shadowstack only: get a new, unused force_index number and
@@ -1154,15 +1175,16 @@
         self.mc.gen_load_int(r.ip.value, fail_index)
         self.mc.STR_ri(r.ip.value, r.fp.value)
 
+
 class AllocOpAssembler(object):
 
     _mixin_ = True
 
-
     # from: ../x86/regalloc.py:750
     # called from regalloc
     # XXX kill this function at some point
-    def _regalloc_malloc_varsize(self, size, size_box, vloc, vbox, ofs_items_loc, regalloc, result):
+    def _regalloc_malloc_varsize(self, size, size_box, vloc, vbox,
+                                        ofs_items_loc, regalloc, result):
         self.mc.MUL(size.value, size.value, vloc.value)
         if ofs_items_loc.is_imm():
             self.mc.ADD_ri(size.value, size.value, ofs_items_loc.value)
@@ -1170,8 +1192,8 @@
             self.mc.ADD_rr(size.value, size.value, ofs_items_loc.value)
         force_index = self.write_new_force_index()
         regalloc.force_spill_var(vbox)
-        self._emit_call(force_index, self.malloc_func_addr, [size_box], regalloc,
-                                    result=result)
+        self._emit_call(force_index, self.malloc_func_addr, [size_box],
+                                                regalloc, result=result)
 
     def emit_op_new(self, op, arglocs, regalloc, fcond):
         self.propagate_memoryerror_if_r0_is_null()
@@ -1203,6 +1225,7 @@
     emit_op_newstr = emit_op_new_array
     emit_op_newunicode = emit_op_new_array
 
+
 class FloatOpAssemlber(object):
     _mixin_ = True
 
@@ -1244,6 +1267,7 @@
         self.mc.VCVT_int_to_float(res.value, temp.value)
         return fcond
 
+
 class ResOpAssembler(GuardOpAssembler, IntOpAsslember,
                     OpAssembler, UnaryIntOpAssembler,
                     FieldOpAssembler, ArrayOpAssember,
@@ -1251,4 +1275,3 @@
                     ForceOpAssembler, AllocOpAssembler,
                     FloatOpAssemlber):
     pass
-
diff --git a/pypy/jit/backend/arm/regalloc.py b/pypy/jit/backend/arm/regalloc.py
--- a/pypy/jit/backend/arm/regalloc.py
+++ b/pypy/jit/backend/arm/regalloc.py
@@ -1,5 +1,5 @@
 from pypy.jit.backend.llsupport.regalloc import FrameManager, \
-        RegisterManager, compute_vars_longevity, TempBox, compute_loop_consts
+        RegisterManager, TempBox, compute_loop_consts
 from pypy.jit.backend.arm import registers as r
 from pypy.jit.backend.arm import locations
 from pypy.jit.backend.arm.locations import imm
@@ -15,17 +15,16 @@
 from pypy.jit.backend.arm.arch import MY_COPY_OF_REGS, WORD
 from pypy.jit.codewriter import longlong
 from pypy.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr,
-                                        Box, BoxInt, BoxPtr, AbstractFailDescr,
+                                        Box, BoxPtr,
                                         INT, REF, FLOAT, LoopToken)
 from pypy.jit.metainterp.resoperation import rop
 from pypy.jit.backend.llsupport.descr import BaseFieldDescr, BaseArrayDescr, \
-                                             BaseCallDescr, BaseSizeDescr, \
-                                             InteriorFieldDescr
+                                             BaseSizeDescr, InteriorFieldDescr
 from pypy.jit.backend.llsupport import symbolic
-from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory
+from pypy.rpython.lltypesystem import lltype, rffi, rstr
 from pypy.jit.codewriter import heaptracker
 from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.rlib.objectmodel import we_are_translated
+
 
 class TempInt(TempBox):
     type = INT
@@ -33,30 +32,36 @@
     def __repr__(self):
         return "<TempInt at %s>" % (id(self),)
 
+
 class TempPtr(TempBox):
     type = REF
 
     def __repr__(self):
         return "<TempPtr at %s>" % (id(self),)
 
+
 class TempFloat(TempBox):
     type = FLOAT
 
     def __repr__(self):
         return "<TempFloat at %s>" % (id(self),)
 
+
 class ARMFrameManager(FrameManager):
+
     def __init__(self):
         FrameManager.__init__(self)
         self.frame_depth = 1
+
     @staticmethod
     def frame_pos(loc, type):
         num_words = ARMFrameManager.frame_size(type)
         if type == FLOAT:
             # Make sure that loc is an even value
             # the frame layout requires loc to be even!!
-            assert (loc & 1) == 0 
-            return locations.StackLocation(loc+1, num_words=num_words, type=type)
+            assert (loc & 1) == 0
+            return locations.StackLocation(loc + 1,
+                            num_words=num_words, type=type)
         return locations.StackLocation(loc, num_words=num_words, type=type)
 
     @staticmethod
@@ -65,9 +70,11 @@
             return  2
         return 1
 
+
 def void(self, op, fcond):
     return []
 
+
 class VFPRegisterManager(RegisterManager):
     all_regs = r.all_vfp_regs
     box_types = [FLOAT]
@@ -91,7 +98,6 @@
         return r
 
     def ensure_value_is_boxed(self, thing, forbidden_vars=[]):
-        box = None
         loc = None
         if isinstance(thing, Const):
             assert isinstance(thing, ConstFloat)
@@ -103,18 +109,20 @@
                             forbidden_vars=self.temp_boxes + forbidden_vars)
         return loc
 
-    def get_scratch_reg(self, type=FLOAT, forbidden_vars=[], selected_reg=None):
-        assert type == FLOAT # for now
+    def get_scratch_reg(self, type=FLOAT, forbidden_vars=[],
+                                                        selected_reg=None):
+        assert type == FLOAT  # for now
         box = TempFloat()
         self.temp_boxes.append(box)
-        reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg)
+        reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars,
+                                                    selected_reg=selected_reg)
         return reg
 
 
 class ARMv7RegisterMananger(RegisterManager):
-    all_regs              = r.all_regs
-    box_types             = None       # or a list of acceptable types
-    no_lower_byte_regs    = all_regs
+    all_regs = r.all_regs
+    box_types = None       # or a list of acceptable types
+    no_lower_byte_regs = all_regs
     save_around_call_regs = r.caller_resp
 
     REGLOC_TO_COPY_AREA_OFS = {
@@ -144,14 +152,14 @@
         assert 0
 
     def ensure_value_is_boxed(self, thing, forbidden_vars=None):
-        box = None
         loc = None
         if isinstance(thing, Const):
             if isinstance(thing, ConstPtr):
                 tp = REF
             else:
                 tp = INT
-            loc = self.get_scratch_reg(tp, forbidden_vars=self.temp_boxes + forbidden_vars)
+            loc = self.get_scratch_reg(tp, forbidden_vars=self.temp_boxes
+                                                            + forbidden_vars)
             imm = self.convert_to_imm(thing)
             self.assembler.load(loc, imm)
         else:
@@ -163,9 +171,11 @@
         assert type == INT or type == REF
         box = TempBox()
         self.temp_boxes.append(box)
-        reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars, selected_reg=selected_reg)
+        reg = self.force_allocate_reg(box, forbidden_vars=forbidden_vars,
+                                                    selected_reg=selected_reg)
         return reg
 
+
 class Regalloc(object):
 
     def __init__(self, longevity, frame_manager=None, assembler=None):
@@ -181,7 +191,7 @@
             return self.vfprm.loc(var)
         else:
             return self.rm.loc(var)
-    
+
     def position(self):
         return self.rm.position
 
@@ -219,9 +229,11 @@
         else:
             return self.rm.force_allocate_reg(var, forbidden_vars,
                                               selected_reg, need_lower_byte)
+
     def try_allocate_reg(self, v, selected_reg=None, need_lower_byte=False):
         if v.type == FLOAT:
-            return self.vfprm.try_allocate_reg(v, selected_reg, need_lower_byte)
+            return self.vfprm.try_allocate_reg(v, selected_reg,
+                                                            need_lower_byte)
         else:
             return self.rm.try_allocate_reg(v, selected_reg, need_lower_byte)
 
@@ -234,17 +246,18 @@
     def possibly_free_vars_for_op(self, op):
         for i in range(op.numargs()):
             var = op.getarg(i)
-            if var is not None: # xxx kludgy
+            if var is not None:  # xxx kludgy
                 self.possibly_free_var(var)
 
     def possibly_free_vars(self, vars):
         for var in vars:
-            if var is not None: # xxx kludgy
+            if var is not None:  # xxx kludgy
                 self.possibly_free_var(var)
 
     def get_scratch_reg(self, type, forbidden_vars=[], selected_reg=None):
         if type == FLOAT:
-            return self.vfprm.get_scratch_reg(type, forbidden_vars, selected_reg)
+            return self.vfprm.get_scratch_reg(type, forbidden_vars,
+                                                                selected_reg)
         else:
             return self.rm.get_scratch_reg(type, forbidden_vars, selected_reg)
 
@@ -275,10 +288,9 @@
         for i in range(len(inputargs)):
             arg = inputargs[i]
             assert not isinstance(arg, Const)
-            reg = None
             loc = inputargs[i]
             if arg not in loop_consts and self.longevity[arg][1] > -1:
-                reg = self.try_allocate_reg(loc)
+                self.try_allocate_reg(loc)
 
             loc = self.loc(arg)
             if arg.type == FLOAT:
@@ -286,7 +298,6 @@
             else:
                 nonfloatlocs[i] = loc
         self.possibly_free_vars(list(inputargs))
-        
         return nonfloatlocs, floatlocs
 
     def update_bindings(self, locs, frame_depth, inputargs):
@@ -318,7 +329,6 @@
         # is also used on op args, which is a non-resizable list
         self.possibly_free_vars(list(inputargs))
 
-
     def force_spill_var(self, var):
         if var.type == FLOAT:
             self.vfprm.force_spill_var(var)
@@ -328,6 +338,7 @@
     def before_call(self, force_store=[], save_all_regs=False):
         self.rm.before_call(force_store, save_all_regs)
         self.vfprm.before_call(force_store, save_all_regs)
+
     def _ensure_value_is_boxed(self, thing, forbidden_vars=[]):
         if thing.type == FLOAT:
             return self.vfprm.ensure_value_is_boxed(thing, forbidden_vars)
@@ -405,7 +416,6 @@
         res = self.force_allocate_reg(op.result)
         return self._prepare_guard(guard, [reg1, reg2, res])
 
-
     def prepare_guard_int_add_ovf(self, op, guard, fcond):
         locs = self._prepare_op_int_add(op, fcond)
         res = self.force_allocate_reg(op.result)
@@ -425,9 +435,12 @@
     prepare_op_int_and = prepare_op_ri('int_and')
     prepare_op_int_or = prepare_op_ri('int_or')
     prepare_op_int_xor = prepare_op_ri('int_xor')
-    prepare_op_int_lshift = prepare_op_ri('int_lshift', imm_size=0x1F, allow_zero=False, commutative=False)
-    prepare_op_int_rshift = prepare_op_ri('int_rshift', imm_size=0x1F, allow_zero=False, commutative=False)
-    prepare_op_uint_rshift = prepare_op_ri('uint_rshift', imm_size=0x1F, allow_zero=False, commutative=False)
+    prepare_op_int_lshift = prepare_op_ri('int_lshift', imm_size=0x1F,
+                                        allow_zero=False, commutative=False)
+    prepare_op_int_rshift = prepare_op_ri('int_rshift', imm_size=0x1F,
+                                        allow_zero=False, commutative=False)
+    prepare_op_uint_rshift = prepare_op_ri('uint_rshift', imm_size=0x1F,
+                                        allow_zero=False, commutative=False)
 
     prepare_op_int_lt = prepare_cmp_op('int_lt')
     prepare_op_int_le = prepare_cmp_op('int_le')
@@ -464,7 +477,6 @@
     prepare_op_int_add_ovf = prepare_op_int_add
     prepare_op_int_sub_ovf = prepare_op_int_sub
 
-
     prepare_op_int_is_true = prepare_op_unary_cmp('int_is_true')
     prepare_op_int_is_zero = prepare_op_unary_cmp('int_is_zero')
 
@@ -545,7 +557,6 @@
     prepare_op_guard_overflow = prepare_op_guard_no_overflow
     prepare_op_guard_not_invalidated = prepare_op_guard_no_overflow
 
-
     def prepare_op_guard_exception(self, op, fcond):
         boxes = list(op.getarglist())
         arg0 = ConstInt(rffi.cast(lltype.Signed, op.getarg(0).getint()))
@@ -558,7 +569,8 @@
             resloc = None
         pos_exc_value = imm(self.cpu.pos_exc_value())
         pos_exception = imm(self.cpu.pos_exception())
-        arglocs = self._prepare_guard(op, [loc, loc1, resloc, pos_exc_value, pos_exception])
+        arglocs = self._prepare_guard(op,
+                    [loc, loc1, resloc, pos_exc_value, pos_exception])
         return arglocs
 
     def prepare_op_guard_no_exception(self, op, fcond):
@@ -588,9 +600,7 @@
 
         return arglocs
 
-
     def prepare_op_jump(self, op, fcond):
-        assembler = self.assembler
         descr = op.getdescr()
         assert isinstance(descr, LoopToken)
         nonfloatlocs, floatlocs = descr._arm_arglocs
@@ -667,10 +677,9 @@
         self.possibly_free_vars_for_op(op)
         self.free_temp_vars()
         result_loc = self.force_allocate_reg(op.result)
-        return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), 
-                                        imm(itemsize), imm(fieldsize)]
+        return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs),
+                                    imm(itemsize), imm(fieldsize)]
 
-    
     def prepare_op_setinteriorfield_gc(self, op, fcond):
         t = self._unpack_interiorfielddescr(op.getdescr())
         ofs, itemsize, fieldsize, sign = t
@@ -699,11 +708,11 @@
         return [res, base_loc, imm(ofs)]
 
     def prepare_op_setarrayitem_gc(self, op, fcond):
-        a0, a1, a2 = boxes = list(op.getarglist())
+        a0, a1, a2 = list(op.getarglist())
         _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr())
 
         args = op.getarglist()
-        base_loc  = self._ensure_value_is_boxed(a0, args)
+        base_loc = self._ensure_value_is_boxed(a0, args)
         ofs_loc = self._ensure_value_is_boxed(a1, args)
         value_loc = self._ensure_value_is_boxed(a2, args)
         assert check_imm_arg(base_ofs)
@@ -714,7 +723,7 @@
         a0, a1 = boxes = list(op.getarglist())
         _, scale, base_ofs, _, ptr = self._unpack_arraydescr(op.getdescr())
 
-        base_loc  = self._ensure_value_is_boxed(a0, boxes)
+        base_loc = self._ensure_value_is_boxed(a0, boxes)
         ofs_loc = self._ensure_value_is_boxed(a1, boxes)
         self.possibly_free_vars_for_op(op)
         self.free_temp_vars()
@@ -804,8 +813,9 @@
 
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
                                          self.cpu.translate_support_code)
-        scale = itemsize/2
-        return [res, base_loc, ofs_loc, imm(scale), imm(basesize), imm(itemsize)]
+        scale = itemsize / 2
+        return [res, base_loc, ofs_loc,
+            imm(scale), imm(basesize), imm(itemsize)]
 
     def prepare_op_unicodesetitem(self, op, fcond):
         boxes = list(op.getarglist())
@@ -814,8 +824,9 @@
         value_loc = self._ensure_value_is_boxed(boxes[2], boxes)
         basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE,
                                          self.cpu.translate_support_code)
-        scale = itemsize/2
-        return [value_loc, base_loc, ofs_loc, imm(scale), imm(basesize), imm(itemsize)]
+        scale = itemsize / 2
+        return [value_loc, base_loc, ofs_loc,
+            imm(scale), imm(basesize), imm(itemsize)]
 
     def prepare_op_same_as(self, op, fcond):
         arg = op.getarg(0)
@@ -839,8 +850,9 @@
         else:
             arglocs = self._prepare_args_for_new_op(op.getdescr())
             force_index = self.assembler.write_new_force_index()
-            self.assembler._emit_call(force_index, self.assembler.malloc_func_addr,
-                                    arglocs, self, fcond, result=op.result)
+            self.assembler._emit_call(force_index,
+                        self.assembler.malloc_func_addr, arglocs,
+                        self, fcond, result=op.result)
             self.possibly_free_vars(arglocs)
         self.possibly_free_var(op.result)
         return []
@@ -853,8 +865,9 @@
         else:
             callargs = self._prepare_args_for_new_op(descrsize)
             force_index = self.assembler.write_new_force_index()
-            self.assembler._emit_call(force_index, self.assembler.malloc_func_addr,
-                                        callargs, self, fcond, result=op.result)
+            self.assembler._emit_call(force_index,
+                        self.assembler.malloc_func_addr, callargs,
+                        self, fcond, result=op.result)
             self.possibly_free_vars(callargs)
         self.possibly_free_var(op.result)
         return [imm(classint)]
@@ -875,8 +888,9 @@
             argboxes = [ConstInt(x) for x in args]
             argboxes.append(box_num_elem)
             force_index = self.assembler.write_new_force_index()
-            self.assembler._emit_call(force_index, self.assembler.malloc_array_func_addr,
-                                        argboxes, self, fcond, result=op.result)
+            self.assembler._emit_call(force_index,
+                    self.assembler.malloc_array_func_addr, argboxes, self,
+                    fcond, result=op.result)
             return []
         # boehm GC
         itemsize, scale, basesize, ofs_length, _ = (
@@ -916,7 +930,7 @@
         for v, val in self.frame_manager.frame_bindings.items():
             if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
                 assert val.is_stack()
-                gcrootmap.add_frame_offset(shape, val.position*-WORD)
+                gcrootmap.add_frame_offset(shape, val.position * -WORD)
         for v, reg in self.rm.reg_bindings.items():
             if reg is r.r0:
                 continue
@@ -929,6 +943,7 @@
                     assert 0, 'sure??'
         return gcrootmap.compress_callshape(shape,
                                             self.assembler.datablockwrapper)
+
     def prepare_op_newstr(self, op, fcond):
         gc_ll_descr = self.cpu.gc_ll_descr
         if gc_ll_descr.get_funcptr_for_newstr is not None:
@@ -947,8 +962,9 @@
         gc_ll_descr = self.cpu.gc_ll_descr
         if gc_ll_descr.get_funcptr_for_newunicode is not None:
             force_index = self.assembler.write_new_force_index()
-            self.assembler._emit_call(force_index, self.assembler.malloc_unicode_func_addr,
-                                    [op.getarg(0)], self, fcond, op.result)
+            self.assembler._emit_call(force_index,
+                                self.assembler.malloc_unicode_func_addr,
+                                [op.getarg(0)], self, fcond, op.result)
             return []
         # boehm GC
         ofs_items, _, ofs = symbolic.get_array_token(rstr.UNICODE,
@@ -1047,7 +1063,8 @@
         assert isinstance(descr, LoopToken)
         jd = descr.outermost_jitdriver_sd
         assert jd is not None
-        size = jd.portal_calldescr.get_result_size(self.cpu.translate_support_code)
+        size = jd.portal_calldescr.get_result_size(
+                                self.cpu.translate_support_code)
         vable_index = jd.index_of_virtualizable
         if vable_index >= 0:
             self._sync_var(op.getarg(vable_index))
@@ -1097,7 +1114,8 @@
         arraydescr = descr.arraydescr
         ofs = arraydescr.get_base_size(self.cpu.translate_support_code)
         itemsize = arraydescr.get_item_size(self.cpu.translate_support_code)
-        fieldsize = descr.fielddescr.get_field_size(self.cpu.translate_support_code)
+        fieldsize = descr.fielddescr.get_field_size(
+                                            self.cpu.translate_support_code)
         sign = descr.fielddescr.is_field_signed()
         ofs += descr.fielddescr.offset
         return ofs, itemsize, fieldsize, sign
@@ -1105,22 +1123,37 @@
     prepare_op_float_add = prepare_float_op(name='prepare_op_float_add')
     prepare_op_float_sub = prepare_float_op(name='prepare_op_float_sub')
     prepare_op_float_mul = prepare_float_op(name='prepare_op_float_mul')
-    prepare_op_float_truediv = prepare_float_op(name='prepare_op_float_truediv')
-    prepare_op_float_lt = prepare_float_op(float_result=False, name='prepare_op_float_lt')
-    prepare_op_float_le = prepare_float_op(float_result=False, name='prepare_op_float_le')
-    prepare_op_float_eq = prepare_float_op(float_result=False, name='prepare_op_float_eq')
-    prepare_op_float_ne = prepare_float_op(float_result=False, name='prepare_op_float_ne')
-    prepare_op_float_gt = prepare_float_op(float_result=False, name='prepare_op_float_gt')
-    prepare_op_float_ge = prepare_float_op(float_result=False, name='prepare_op_float_ge')
-    prepare_op_float_neg = prepare_float_op(base=False, name='prepare_op_float_neg')
-    prepare_op_float_abs = prepare_float_op(base=False, name='prepare_op_float_abs')
+    prepare_op_float_truediv = prepare_float_op(
+                                            name='prepare_op_float_truediv')
+    prepare_op_float_lt = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_lt')
+    prepare_op_float_le = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_le')
+    prepare_op_float_eq = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_eq')
+    prepare_op_float_ne = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_ne')
+    prepare_op_float_gt = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_gt')
+    prepare_op_float_ge = prepare_float_op(float_result=False,
+                                            name='prepare_op_float_ge')
+    prepare_op_float_neg = prepare_float_op(base=False,
+                                            name='prepare_op_float_neg')
+    prepare_op_float_abs = prepare_float_op(base=False,
+                                            name='prepare_op_float_abs')
 
-    prepare_guard_float_lt = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_lt')
-    prepare_guard_float_le = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_le')
-    prepare_guard_float_eq = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_eq')
-    prepare_guard_float_ne = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_ne')
-    prepare_guard_float_gt = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_gt')
-    prepare_guard_float_ge = prepare_float_op(guard=True, float_result=False, name='prepare_guard_float_ge')
+    prepare_guard_float_lt = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_lt')
+    prepare_guard_float_le = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_le')
+    prepare_guard_float_eq = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_eq')
+    prepare_guard_float_ne = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_ne')
+    prepare_guard_float_gt = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_gt')
+    prepare_guard_float_ge = prepare_float_op(guard=True,
+                            float_result=False, name='prepare_guard_float_ge')
 
     def prepare_op_math_sqrt(self, op, fcond):
         loc = self._ensure_value_is_boxed(op.getarg(1))
@@ -1135,7 +1168,7 @@
         temp_loc = self.get_scratch_reg(FLOAT)
         self.possibly_free_vars_for_op(op)
         self.free_temp_vars()
-        res  = self.rm.force_allocate_reg(op.result)
+        res = self.rm.force_allocate_reg(op.result)
         return [loc1, temp_loc, res]
 
     def prepare_op_cast_int_to_float(self, op, fcond):
@@ -1143,20 +1176,24 @@
         temp_loc = self.get_scratch_reg(FLOAT)
         self.possibly_free_vars_for_op(op)
         self.free_temp_vars()
-        res  = self.vfprm.force_allocate_reg(op.result)
+        res = self.vfprm.force_allocate_reg(op.result)
         return [loc1, temp_loc, res]
 
     def prepare_force_spill(self, op, fcond):
         self.force_spill_var(op.getarg(0))
         return []
 
+
 def add_none_argument(fn):
     return lambda self, op, fcond: fn(self, op, None, fcond)
 
+
 def notimplemented(self, op, fcond):
-    raise NotImplementedError, op
+    raise NotImplementedError(op)
+
+
 def notimplemented_with_guard(self, op, guard_op, fcond):
-    raise NotImplementedError, op
+    raise NotImplementedError(op)
 
 operations = [notimplemented] * (rop._LAST + 1)
 operations_with_guard = [notimplemented_with_guard] * (rop._LAST + 1)
diff --git a/pypy/jit/backend/arm/registers.py b/pypy/jit/backend/arm/registers.py
--- a/pypy/jit/backend/arm/registers.py
+++ b/pypy/jit/backend/arm/registers.py
@@ -1,11 +1,14 @@
-from pypy.jit.backend.arm.locations import RegisterLocation, VFPRegisterLocation
+from pypy.jit.backend.arm.locations import VFPRegisterLocation
+from pypy.jit.backend.arm.locations import RegisterLocation
 
 registers = [RegisterLocation(i) for i in range(16)]
 vfpregisters = [VFPRegisterLocation(i) for i in range(16)]
-r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15 = registers
+[r0, r1, r2, r3, r4, r5, r6, r7,
+    r8, r9, r10, r11, r12, r13, r14, r15] = registers
 
 #vfp registers interpreted as 64-bit registers
-d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15 = vfpregisters
+[d0, d1, d2, d3, d4, d5, d6, d7,
+    d8, d9, d10, d11, d12, d13, d14, d15] = vfpregisters
 
 # aliases for registers
 fp = r11
@@ -20,11 +23,10 @@
 
 caller_resp = [r0, r1, r2, r3]
 callee_resp = [r4, r5, r6, r7, r8, r9, r10, fp]
-callee_saved_registers = callee_resp+[lr]
-callee_restored_registers = callee_resp+[pc]
+callee_saved_registers = callee_resp + [lr]
+callee_restored_registers = callee_resp + [pc]
 
 caller_vfp_resp = [d0, d1, d2, d3, d4, d5, d6, d7]
 callee_vfp_resp = [d8, d9, d10, d11, d12, d13, d14, d15]
 
 callee_saved_vfp_registers = callee_vfp_resp
-
diff --git a/pypy/jit/backend/arm/runner.py b/pypy/jit/backend/arm/runner.py
--- a/pypy/jit/backend/arm/runner.py
+++ b/pypy/jit/backend/arm/runner.py
@@ -21,12 +21,13 @@
             assert gcdescr.config.translation.gcremovetypeptr is False
         AbstractLLCPU.__init__(self, rtyper, stats, opts,
                                translate_support_code, gcdescr)
+
     def setup(self):
         if self.opts is not None:
             failargs_limit = self.opts.failargs_limit
         else:
             failargs_limit = 1000
-        self.assembler = AssemblerARM(self)
+        self.assembler = AssemblerARM(self, failargs_limit=failargs_limit)
 
     def setup_once(self):
         self.assembler.setup_once()
@@ -34,7 +35,8 @@
     def finish_once(self):
         pass
 
-    def compile_loop(self, inputargs, operations, looptoken, log=True, name=''):
+    def compile_loop(self, inputargs, operations, looptoken,
+                                                    log=True, name=''):
         self.assembler.assemble_loop(inputargs, operations,
                                                     looptoken, log=log)
 
@@ -113,11 +115,11 @@
         faildescr = self.get_fail_descr_from_number(fail_index)
         rffi.cast(TP, addr_of_force_index)[0] = ~fail_index
         # start of "no gc operation!" block
-        frame_depth = faildescr._arm_frame_depth*WORD
+        frame_depth = faildescr._arm_frame_depth * WORD
         addr_end_of_frame = (addr_of_force_index -
                             (frame_depth +
-                            len(all_regs)*WORD + 
-                            len(all_vfp_regs)*2*WORD))
+                            len(all_regs) * WORD +
+                            len(all_vfp_regs) * 2 * WORD))
         fail_index_2 = self.assembler.failure_recovery_func(
             faildescr._failure_recovery_code,
             addr_of_force_index,
diff --git a/pypy/jit/backend/arm/shift.py b/pypy/jit/backend/arm/shift.py
--- a/pypy/jit/backend/arm/shift.py
+++ b/pypy/jit/backend/arm/shift.py
@@ -3,4 +3,4 @@
 LSR = 0x1
 ASR = 0x2
 ROR = 0x3
-RRX = 0x3 # with imm = 0
+RRX = 0x3  # with imm = 0


More information about the pypy-commit mailing list