From pypy.commits at gmail.com Wed Dec 16 09:28:17 2015 From: pypy.commits at gmail.com (pypy project) Date: Wed, 16 Dec 2015 15:28:17 +0100 Subject: [pypy-commit] yet another test Message-ID: hello world -------------- next part -------------- An HTML attachment was scrubbed... URL: From pypy.commits at gmail.com Wed Dec 16 09:17:07 2015 From: pypy.commits at gmail.com (pypy project) Date: Wed, 16 Dec 2015 15:17:07 +0100 Subject: [pypy-commit] test Message-ID: this is a test -------------- next part -------------- An HTML attachment was scrubbed... URL: From pypy.commits at gmail.com Wed Dec 16 09:31:29 2015 From: pypy.commits at gmail.com (antocuni) Date: Wed, 16 Dec 2015 06:31:29 -0800 (PST) Subject: [pypy-commit] this is a test Message-ID: <567175c1.a9c8c20a.87520.43a7@mx.google.com> This is a test to see if the bitbucket hook can send emails to the pypy-commit mailing list From pypy.commits at gmail.com Wed Dec 16 09:34:19 2015 From: pypy.commits at gmail.com (antocuni) Date: Wed, 16 Dec 2015 06:34:19 -0800 (PST) Subject: [pypy-commit] this is a test Message-ID: <5671766b.6408c20a.7346.46a4@mx.google.com> This is a test to see if the bitbucket hook can send emails to the pypy-commit mailing list From pypy.commits at gmail.com Wed Dec 16 09:35:44 2015 From: pypy.commits at gmail.com (antocuni) Date: Wed, 16 Dec 2015 06:35:44 -0800 (PST) Subject: [pypy-commit] this is another test Message-ID: <567176c0.e2b0c20a.b8325.3fad@mx.google.com> This is a test to see if the bitbucket hook can send emails to the pypy-commit mailing list From pypy.commits at gmail.com Wed Dec 16 09:48:52 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Dec 2015 06:48:52 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added guard no exception, second cond_call now passing Message-ID: <567179d4.458c700a.db388.5767@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81346:2d4a2bf50e28 Date: 2015-12-16 15:05 +0100 http://bitbucket.org/pypy/pypy/changeset/2d4a2bf50e28/ Log: added guard no exception, second cond_call now passing diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -283,7 +283,6 @@ fcond = self.guard_success_cc self.guard_success_cc = c.cond_none assert fcond != c.cond_none - orig_cond = fcond fcond = c.negate(fcond) jmp_adr = self.mc.get_relative_pos() @@ -326,7 +325,7 @@ pmc.overwrite() # might be overridden again to skip over the following # guard_no_exception too - self.previous_cond_call_jcond = jmp_adr, orig_cond + self.previous_cond_call_jcond = jmp_adr, fcond class AllocOpAssembler(object): _mixin_ = True @@ -943,6 +942,21 @@ def emit_leave_portal_frame(self, op, arglocs, regalloc): self.leave_portal_frame(op) + def emit_guard_no_exception(self, op, arglocs, regalloc): + self.mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) + self.mc.LG(r.SCRATCH2, l.addr(0,r.SCRATCH)) + self.mc.cmp_op(r.SCRATCH2, l.imm(0), imm=True) + self.guard_success_cc = c.EQ + self._emit_guard(op, arglocs) + # If the previous operation was a COND_CALL, overwrite its conditional + # jump to jump over this GUARD_NO_EXCEPTION as well, if we can + if self._find_nearby_operation(regalloc,-1).getopnum() == rop.COND_CALL: + jmp_adr, fcond = self.previous_cond_call_jcond + relative_target = self.mc.currpos() - jmp_adr + pmc = OverwritingBuilder(self.mc, jmp_adr, 1) + pmc.BRCL(fcond, l.imm(relative_target)) + pmc.overwrite() + class OpAssembler(IntOpAssembler, FloatOpAssembler, GuardOpAssembler, CallOpAssembler, AllocOpAssembler, MemoryOpAssembler, From pypy.commits at gmail.com Wed Dec 16 09:41:11 2015 From: pypy.commits at gmail.com (antocuni) Date: Wed, 16 Dec 2015 06:41:11 -0800 (PST) Subject: [pypy-commit] buildbot default: ignore the file with the password Message-ID: <56717807.458c700a.db388.56be@mx.google.com> Author: Antonio Cuni Branch: Changeset: r973:cb78fa21e8ed Date: 2015-12-16 15:41 +0100 http://bitbucket.org/pypy/buildbot/changeset/cb78fa21e8ed/ Log: ignore the file with the password diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -7,6 +7,7 @@ # master/slaveinfo.py contains the passwords, so it should never be tracked master/slaveinfo.py +bbhook/smtp.password # ignore pidfiles and all the logs twistd.pid From pypy.commits at gmail.com Wed Dec 16 09:49:27 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Dec 2015 06:49:27 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: copied stubs to assemble write barrier and exception path, pushed forward the assembly of cond_call_gc_wb (+array) Message-ID: <567179f7.c2a9190a.f8bd.5ba9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81347:a0e464a31d7f Date: 2015-12-16 15:48 +0100 http://bitbucket.org/pypy/pypy/changeset/a0e464a31d7f/ Log: copied stubs to assemble write barrier and exception path, pushed forward the assembly of cond_call_gc_wb (+array) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -142,7 +142,129 @@ return startpos def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): - pass # TODO + descr = self.cpu.gc_ll_descr.write_barrier_descr + if descr is None: + return + if not withcards: + func = descr.get_write_barrier_fn(self.cpu) + else: + if descr.jit_wb_cards_set == 0: + return + func = descr.get_write_barrier_from_array_fn(self.cpu) + if func == 0: + return + # + # This builds a helper function called from the slow path of + # write barriers. It must save all registers, and optionally + # all fp registers. It takes its single argument in r0 + # (or in SPP if 'for_frame'). + if for_frame: + argument_loc = r.SPP + else: + argument_loc = r.r0 + + mc = PPCBuilder() + old_mc = self.mc + self.mc = mc + + extra_stack_size = LOCAL_VARS_OFFSET + 4 * WORD + 8 + extra_stack_size = (extra_stack_size + 15) & ~15 + if for_frame: + # NOTE: don't save registers on the jitframe here! It might + # override already-saved values that will be restored + # later... + # + # This 'for_frame' version is called after a CALL. It does not + # need to save many registers: the registers that are anyway + # destroyed by the call can be ignored (VOLATILES), and the + # non-volatile registers won't be changed here. It only needs + # to save r.RCS1 (used below), r3 and f1 (possible results of + # the call), and two more non-volatile registers (used to store + # the RPython exception that occurred in the CALL, if any). + # + # We need to increase our stack frame size a bit to store them. + # + self.mc.load(r.SCRATCH.value, r.SP.value, 0) # SP back chain + self.mc.store_update(r.SCRATCH.value, r.SP.value, -extra_stack_size) + self.mc.std(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + self.mc.std(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + self.mc.std(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + self.mc.std(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + self.mc.stfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + saved_regs = None + saved_fp_regs = None + + else: + # push all volatile registers, push RCS1, and sometimes push RCS2 + if withcards: + saved_regs = r.VOLATILES + [r.RCS1, r.RCS2] + else: + saved_regs = r.VOLATILES + [r.RCS1] + if withfloats: + saved_fp_regs = r.MANAGED_FP_REGS + else: + saved_fp_regs = [] + + self._push_core_regs_to_jitframe(mc, saved_regs) + self._push_fp_regs_to_jitframe(mc, saved_fp_regs) + + if for_frame: + # note that it's safe to store the exception in register, + # since the call to write barrier can't collect + # (and this is assumed a bit left and right here, like lack + # of _reload_frame_if_necessary) + # This trashes r0 and r2, which is fine in this case + assert argument_loc is not r.r0 + self._store_and_reset_exception(mc, r.RCS2, r.RCS3) + + if withcards: + mc.mr(r.RCS2.value, argument_loc.value) + # + # Save the lr into r.RCS1 + mc.mflr(r.RCS1.value) + # + func = rffi.cast(lltype.Signed, func) + # Note: if not 'for_frame', argument_loc is r0, which must carefully + # not be overwritten above + mc.mr(r.r3.value, argument_loc.value) + mc.load_imm(mc.RAW_CALL_REG, func) + mc.raw_call() + # + # Restore lr + mc.mtlr(r.RCS1.value) + + if for_frame: + self._restore_exception(mc, r.RCS2, r.RCS3) + + if withcards: + # A final andix before the blr, for the caller. Careful to + # not follow this instruction with another one that changes + # the status of cr0! + card_marking_mask = descr.jit_wb_cards_set_singlebyte + mc.lbz(r.RCS2.value, r.RCS2.value, descr.jit_wb_if_flag_byteofs) + mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) + + if for_frame: + self.mc.ld(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + self.mc.ld(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + self.mc.ld(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + self.mc.ld(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + self.mc.lfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + self.mc.addi(r.SP.value, r.SP.value, extra_stack_size) + + else: + self._pop_core_regs_from_jitframe(mc, saved_regs) + self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) + + mc.blr() + + self.mc = old_mc + rawstart = mc.materialize(self.cpu, []) + if for_frame: + self.wb_slowpath[4] = rawstart + else: + self.wb_slowpath[withcards + 2 * withfloats] = rawstart + def build_frame_realloc_slowpath(self): # this code should do the following steps @@ -154,10 +276,78 @@ # f) store the address of the new jitframe in the shadowstack # c) set the gcmap field to 0 in the new jitframe # g) restore registers and return - pass # TODO + mc = PPCBuilder() + self.mc = mc + + # signature of this _frame_realloc_slowpath function: + # * on entry, r0 is the new size + # * on entry, r2 is the gcmap + # * no managed register must be modified + + ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') + mc.store(r.r2.value, r.SPP.value, ofs2) + + self._push_core_regs_to_jitframe(mc) + self._push_fp_regs_to_jitframe(mc) + + # Save away the LR inside r30 + mc.mflr(r.RCS1.value) + + # First argument is SPP (= r31), which is the jitframe + mc.mr(r.r3.value, r.SPP.value) + + # Second argument is the new size, which is still in r0 here + mc.mr(r.r4.value, r.r0.value) + + # This trashes r0 and r2 + self._store_and_reset_exception(mc, r.RCS2, r.RCS3) + + # Do the call + adr = rffi.cast(lltype.Signed, self.cpu.realloc_frame) + mc.load_imm(mc.RAW_CALL_REG, adr) + mc.raw_call() + + # The result is stored back into SPP (= r31) + mc.mr(r.SPP.value, r.r3.value) + + self._restore_exception(mc, r.RCS2, r.RCS3) + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + diff = mc.load_imm_plus(r.r5, gcrootmap.get_root_stack_top_addr()) + mc.load(r.r5.value, r.r5.value, diff) + mc.store(r.r3.value, r.r5.value, -WORD) + + mc.mtlr(r.RCS1.value) # restore LR + self._pop_core_regs_from_jitframe(mc) + self._pop_fp_regs_from_jitframe(mc) + mc.blr() + + self._frame_realloc_slowpath = mc.materialize(self.cpu, []) + self.mc = None def _build_propagate_exception_path(self): - pass # TODO + if not self.cpu.propagate_exception_descr: + return + + self.mc = PPCBuilder() + # + # read and reset the current exception + + propagate_exception_descr = rffi.cast(lltype.Signed, + cast_instance_to_gcref(self.cpu.propagate_exception_descr)) + ofs3 = self.cpu.get_ofs_of_frame_field('jf_guard_exc') + ofs4 = self.cpu.get_ofs_of_frame_field('jf_descr') + + self._store_and_reset_exception(self.mc, r.r3) + self.mc.load_imm(r.r4, propagate_exception_descr) + self.mc.std(r.r3.value, r.SPP.value, ofs3) + self.mc.std(r.r4.value, r.SPP.value, ofs4) + # + self._call_footer() + rawstart = self.mc.materialize(self.cpu, []) + self.propagate_exception_path = rawstart + self.mc = None def _build_cond_call_slowpath(self, supports_floats, callee_only): """ This builds a general call slowpath, for whatever call happens to diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -3,6 +3,9 @@ from rpython.jit.backend.llsupport.regalloc import TempVar import rpython.jit.backend.zarch.registers as r +def check_imm_value(value, lower_bound=-2**15, upper_bound=2**15-1): + return lower_bound <= value <= upper_bound + def check_imm(arg, lower_bound=-2**15, upper_bound=2**15-1): if isinstance(arg, ConstInt): i = arg.getint() diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -1,8 +1,10 @@ +from rpython.jit.backend.llsupport.jump import remap_frame_layout from rpython.jit.backend.zarch.arch import THREADLOCAL_ADDR_OFFSET from rpython.jit.backend.zarch.helper.assembler import (gen_emit_cmp_op, gen_emit_rr_or_rpool, gen_emit_shift, gen_emit_pool_or_rr_evenodd, gen_emit_imm_pool_rr) -from rpython.jit.backend.zarch.helper.regalloc import (check_imm,) +from rpython.jit.backend.zarch.helper.regalloc import (check_imm, + check_imm_value) from rpython.jit.backend.zarch.codebuilder import ZARCHGuardToken, InstrBuilder import rpython.jit.backend.zarch.conditions as c import rpython.jit.backend.zarch.registers as r @@ -17,7 +19,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref -from rpython.jit.backend.llsupport.jump import remap_frame_layout +from rpython.rlib.objectmodel import we_are_translated class IntOpAssembler(object): _mixin_ = True @@ -407,21 +409,24 @@ assert loc_base.is_reg() if is_frame: assert loc_base is r.SPP - assert check_imm(descr.jit_wb_if_flag_byteofs) - mc.lbz(r.SCRATCH2.value, loc_base.value, descr.jit_wb_if_flag_byteofs) - mc.andix(r.SCRATCH.value, r.SCRATCH2.value, mask & 0xFF) + assert check_imm_value(descr.jit_wb_if_flag_byteofs) + mc.LGB(r.SCRATCH2, l.addr(descr.jit_wb_if_flag_byteofs, loc_base)) + mc.LGR(r.SCRATCH, r.SCRATCH2) + mc.NILL(r.SCRATCH, l.imm(mask & 0xFF)) jz_location = mc.get_relative_pos() mc.trap() # patched later with 'beq' + mc.write('\x00' * 4) # for cond_call_gc_wb_array, also add another fast path: # if GCFLAG_CARDS_SET, then we can just set one bit and be done if card_marking_mask: # GCFLAG_CARDS_SET is in the same byte, loaded in r2 already - mc.andix(r.SCRATCH.value, r.SCRATCH2.value, - card_marking_mask & 0xFF) + mc.LGR(r.SCRATCH, r.SCRATCH2) + mc.NILL(r.SCRATCH, l.imm(card_marking_mask & 0xFF)) js_location = mc.get_relative_pos() mc.trap() # patched later with 'bne' + mc.write('\x00' * 4) else: js_location = 0 @@ -491,7 +496,7 @@ byte_index = loc_index.value >> descr.jit_wb_card_page_shift byte_ofs = ~(byte_index >> 3) byte_val = 1 << (byte_index & 7) - assert check_imm(byte_ofs) + assert check_imm_value(byte_ofs) mc.lbz(r.SCRATCH.value, loc_base.value, byte_ofs) mc.ori(r.SCRATCH.value, r.SCRATCH.value, byte_val) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -10,23 +10,24 @@ from rpython.jit.metainterp.history import JitCellToken, TargetToken from rpython.jit.metainterp.resoperation import rop from rpython.jit.backend.zarch import locations as l -from rpython.rtyper.lltypesystem import rffi, lltype, rstr, llmemory -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.descr import ArrayDescr -import rpython.jit.backend.zarch.registers as r -import rpython.jit.backend.zarch.conditions as c -import rpython.jit.backend.zarch.helper.regalloc as helper from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.descr import unpack_fielddescr from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +import rpython.jit.backend.zarch.registers as r +import rpython.jit.backend.zarch.conditions as c +import rpython.jit.backend.zarch.helper.regalloc as helper +from rpython.jit.backend.zarch.helper.regalloc import (check_imm,) +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print -from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib import rgc from rpython.rlib.rarithmetic import r_uint +from rpython.rtyper.lltypesystem import rffi, lltype, rstr, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import cast_instance_to_gcref LIMIT_LOOP_BREAK = 15000 # should be much smaller than 32 KB @@ -98,7 +99,7 @@ forbidden_vars=self.temp_boxes) return loc - def get_scratch_reg(self): + def get_scratch_reg(self,): box = TempFloat() reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) self.temp_boxes.append(box) @@ -898,6 +899,18 @@ locs.append(loc) return locs + def prepare_cond_call_gc_wb(self, op): + arglocs = [self.ensure_reg(op.getarg(0))] + return arglocs + + def prepare_cond_call_gc_wb_array(self, op): + arglocs = [self.ensure_reg(op.getarg(0)), + self.ensure_reg_or_16bit_imm(op.getarg(1)), + None] + if arglocs[1].is_reg(): + arglocs[2] = self.get_scratch_reg(INT) + return arglocs + def _prepare_math_sqrt(self, op): loc = self.ensure_reg(op.getarg(1), force_in_reg=True) self.free_op_vars() From pypy.commits at gmail.com Wed Dec 16 10:10:16 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:10:16 -0800 (PST) Subject: [pypy-commit] pypy default: More docs for GC_LOAD/GC_STORE. Fix the number of expected args of Message-ID: <56717ed8.617d700a.58754.5c92@mx.google.com> Author: Armin Rigo Branch: Changeset: r81349:b4094f570f3d Date: 2015-12-16 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/b4094f570f3d/ Log: More docs for GC_LOAD/GC_STORE. Fix the number of expected args of GC_STORE_INDEXED. diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1170,10 +1170,11 @@ 'GC_LOAD/3/rfi', # parameters GC_LOAD_INDEXED # 1: pointer to complex object - # 2: integer describing the offset + # 2: integer describing the index # 3: constant integer scale factor - # 4: constant integer offset + # 4: constant integer base offset (final offset is 'base + scale * index') # 5: constant integer. byte size of datatype to load (negative if it is signed) + # (GC_LOAD is equivalent to GC_LOAD_INDEXED with arg3==1, arg4==0) 'GC_LOAD_INDEXED/5/rfi', '_RAW_LOAD_FIRST', @@ -1204,8 +1205,9 @@ # same paramters as GC_LOAD, but one additional for the value to store # note that the itemsize is not signed! + # (gcptr, index, value, [scale, base_offset,] itemsize) 'GC_STORE/4d/n', - 'GC_STORE_INDEXED/5d/n', + 'GC_STORE_INDEXED/6d/n', 'INCREMENT_DEBUG_COUNTER/1/n', '_RAW_STORE_FIRST', From pypy.commits at gmail.com Wed Dec 16 09:58:08 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Dec 2015 06:58:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed issue with gc_store, constant was not pushed to literal pool (did not think that value could be constant) Message-ID: <56717c00.421e190a.7a76a.5a02@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81348:62b16757aa42 Date: 2015-12-16 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/62b16757aa42/ Log: fixed issue with gc_store, constant was not pushed to literal pool (did not think that value could be constant) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -142,6 +142,7 @@ return startpos def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): + return descr = self.cpu.gc_ll_descr.write_barrier_descr if descr is None: return @@ -276,6 +277,7 @@ # f) store the address of the new jitframe in the shadowstack # c) set the gcmap field to 0 in the new jitframe # g) restore registers and return + return mc = PPCBuilder() self.mc = mc diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -53,6 +53,10 @@ self.reserve_literal(8) return elif opnum == rop.GC_STORE or opnum == rop.GC_STORE_INDEXED: + arg = op.getarg(2) + if arg.is_constant(): + self.offset_map[arg] = self.size + self.reserve_literal(8) return elif opnum in (rop.GC_LOAD_F, rop.GC_LOAD_I, From pypy.commits at gmail.com Wed Dec 16 10:20:25 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:20:25 -0800 (PST) Subject: [pypy-commit] pypy test-AF_NETLINK: Add branch for AF_NETLINK testing Message-ID: <56718139.a810700a.f7316.4d1c@mx.google.com> Author: Vincent Legoll Branch: test-AF_NETLINK Changeset: r81350:5b0cf7f916bb Date: 2015-12-16 13:38 +0100 http://bitbucket.org/pypy/pypy/changeset/5b0cf7f916bb/ Log: Add branch for AF_NETLINK testing From pypy.commits at gmail.com Wed Dec 16 10:27:02 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:27:02 -0800 (PST) Subject: [pypy-commit] pypy test-AF_NETLINK: ready to merge Message-ID: <567182c6.d4cd190a.4860d.5ffa@mx.google.com> Author: Armin Rigo Branch: test-AF_NETLINK Changeset: r81352:c8fe39bb292a Date: 2015-12-16 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/c8fe39bb292a/ Log: ready to merge From pypy.commits at gmail.com Wed Dec 16 10:27:04 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:27:04 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge test-AF_NETLINK Message-ID: <567182c8.617d700a.58754.5e09@mx.google.com> Author: Armin Rigo Branch: Changeset: r81353:a58986838a03 Date: 2015-12-16 16:23 +0100 http://bitbucket.org/pypy/pypy/changeset/a58986838a03/ Log: hg merge test-AF_NETLINK Add a test for AF_NETLINK (which is already supported but untested) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: From pypy.commits at gmail.com Wed Dec 16 10:27:06 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:27:06 -0800 (PST) Subject: [pypy-commit] pypy default: ignore this branch Message-ID: <567182ca.0197700a.c449f.5b28@mx.google.com> Author: Armin Rigo Branch: Changeset: r81354:5b6101c922b5 Date: 2015-12-16 16:24 +0100 http://bitbucket.org/pypy/pypy/changeset/5b6101c922b5/ Log: ignore this branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -79,3 +79,4 @@ Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. .. branch: flowspace-cleanups +.. branch: test-AF_NETLINK From pypy.commits at gmail.com Wed Dec 16 10:33:39 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:33:39 -0800 (PST) Subject: [pypy-commit] pypy small-cleanups-misc: new branch Message-ID: <56718453.a3eb700a.16835.5f2b@mx.google.com> Author: Vincent Legoll Branch: small-cleanups-misc Changeset: r81355:f5c6a4898265 Date: 2015-12-16 13:49 +0100 http://bitbucket.org/pypy/pypy/changeset/f5c6a4898265/ Log: new branch From pypy.commits at gmail.com Wed Dec 16 10:33:46 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:33:46 -0800 (PST) Subject: [pypy-commit] pypy small-cleanups-misc: Fix whitespace Message-ID: <5671845a.4a9c190a.1cd77.6147@mx.google.com> Author: Vincent Legoll Branch: small-cleanups-misc Changeset: r81358:39f43b29dfff Date: 2015-12-14 09:48 +0100 http://bitbucket.org/pypy/pypy/changeset/39f43b29dfff/ Log: Fix whitespace diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool From pypy.commits at gmail.com Wed Dec 16 10:33:42 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:33:42 -0800 (PST) Subject: [pypy-commit] pypy small-cleanups-misc: Remove stale comment Message-ID: <56718456.122c190a.d7439.5e6a@mx.google.com> Author: Vincent Legoll Branch: small-cleanups-misc Changeset: r81356:a607974f68c6 Date: 2015-12-13 11:16 +0100 http://bitbucket.org/pypy/pypy/changeset/a607974f68c6/ Log: Remove stale comment Ronan Lamy thinks this comment is obsolete (c.f. #2198) Remove it so that newcomers don't start to work on it. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 From pypy.commits at gmail.com Wed Dec 16 10:33:44 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:33:44 -0800 (PST) Subject: [pypy-commit] pypy small-cleanups-misc: Fix typos in comments Message-ID: <56718458.82ca700a.fae73.5f01@mx.google.com> Author: Vincent Legoll Branch: small-cleanups-misc Changeset: r81357:84c16014baee Date: 2015-12-14 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/84c16014baee/ Log: Fix typos in comments diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 From pypy.commits at gmail.com Wed Dec 16 10:36:38 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:36:38 -0800 (PST) Subject: [pypy-commit] pypy small-cleanups-misc: ready to merge Message-ID: <56718506.d587190a.33063.5c00@mx.google.com> Author: Armin Rigo Branch: small-cleanups-misc Changeset: r81359:6a2b78b0e766 Date: 2015-12-16 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/6a2b78b0e766/ Log: ready to merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -79,3 +79,4 @@ Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. .. branch: flowspace-cleanups +.. branch: small-cleanups-misc From pypy.commits at gmail.com Wed Dec 16 10:36:40 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:36:40 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge small-cleanups-misc Message-ID: <56718508.421e190a.7a76a.5d3b@mx.google.com> Author: Armin Rigo Branch: Changeset: r81360:82a060cff0e6 Date: 2015-12-16 16:35 +0100 http://bitbucket.org/pypy/pypy/changeset/82a060cff0e6/ Log: hg merge small-cleanups-misc diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -80,3 +80,4 @@ .. branch: flowspace-cleanups .. branch: test-AF_NETLINK +.. branch: small-cleanups-misc diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool From pypy.commits at gmail.com Wed Dec 16 10:20:27 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 07:20:27 -0800 (PST) Subject: [pypy-commit] pypy test-AF_NETLINK: Add a test for AF_NETLINK sockets Message-ID: <5671813b.29da700a.7af33.5c35@mx.google.com> Author: Vincent Legoll Branch: test-AF_NETLINK Changeset: r81351:ddec8c4b70b1 Date: 2015-12-16 13:41 +0100 http://bitbucket.org/pypy/pypy/changeset/ddec8c4b70b1/ Log: Add a test for AF_NETLINK sockets This was asked for by Armin Rigo in Issue #1942 diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: From pypy.commits at gmail.com Wed Dec 16 10:41:17 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 07:41:17 -0800 (PST) Subject: [pypy-commit] pypy default: PR #374 Message-ID: <5671861d.8689190a.1f383.5cd5@mx.google.com> Author: Armin Rigo Branch: Changeset: r81361:781195be4fc9 Date: 2015-12-16 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/781195be4fc9/ Log: PR #374 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -79,5 +79,8 @@ Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. .. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + .. branch: test-AF_NETLINK .. branch: small-cleanups-misc From pypy.commits at gmail.com Wed Dec 16 11:22:46 2015 From: pypy.commits at gmail.com (rlamy) Date: Wed, 16 Dec 2015 08:22:46 -0800 (PST) Subject: [pypy-commit] pypy default: Add explicit casts to untranslated make_stat_result() and make_statvfs_result() Message-ID: <56718fd6.90a2190a.20405.62f4@mx.google.com> Author: Ronan Lamy Branch: Changeset: r81362:d1769628ff5d Date: 2015-12-16 16:21 +0000 http://bitbucket.org/pypy/pypy/changeset/d1769628ff5d/ Log: Add explicit casts to untranslated make_stat_result() and make_statvfs_result() diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -169,10 +169,12 @@ def make_stat_result(tup): """Turn a tuple into an os.stat_result object.""" - positional = tup[:N_INDEXABLE_FIELDS] + positional = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STAT_FIELDS)[:N_INDEXABLE_FIELDS]) kwds = {} - for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]): - kwds[name] = tup[N_INDEXABLE_FIELDS + i] + for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]: + kwds[name] = lltype.cast_primitive(TYPE, value) return os.stat_result(positional, kwds) @@ -221,8 +223,6 @@ class StatvfsResultRepr(Repr): def __init__(self, rtyper): self.rtyper = rtyper - self.statvfs_fields = STATVFS_FIELDS - self.statvfs_field_indexes = {} for i, (name, TYPE) in enumerate(STATVFS_FIELDS): self.statvfs_field_indexes[name] = i @@ -261,10 +261,11 @@ return r_sta.redispatch_getfield(hop, index) - def make_statvfs_result(tup): - return os.statvfs_result(tup) - + args = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STATVFS_FIELDS)) + return os.statvfs_result(args) class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): _about_ = make_statvfs_result From pypy.commits at gmail.com Wed Dec 16 13:20:38 2015 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Dec 2015 10:20:38 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5671ab76.0292700a.2236b.6aeb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r677:663f0376570f Date: 2015-12-16 19:20 +0100 http://bitbucket.org/pypy/pypy.org/changeset/663f0376570f/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $61477 of $105000 (58.5%) + $61586 of $105000 (58.7%)
@@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Wed Dec 16 09:23:40 2015 From: pypy.commits at gmail.com (pypy project) Date: Wed, 16 Dec 2015 15:23:40 +0100 Subject: [pypy-commit] this is another test Message-ID: please show up -------------- next part -------------- An HTML attachment was scrubbed... URL: From pypy.commits at gmail.com Wed Dec 16 17:07:32 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Wed, 16 Dec 2015 14:07:32 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Fix translation Message-ID: <5671e0a4.a83d700a.49d2e.7a58@mx.google.com> Author: Vincent Legoll Branch: fix-2198 Changeset: r81363:72204e7474a5 Date: 2015-12-16 13:28 +0100 http://bitbucket.org/pypy/pypy/changeset/72204e7474a5/ Log: Fix translation diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1473,14 +1473,17 @@ return # shortcut, really there's nothing to do items = self.unerase(w_list.lstorage) if step == 1: # Support list resizing for non-extended slices - delta = slicelength - len2 - if delta > 0: - # start < 0 is only possible with slicelength == 0 - assert start >= 0 - if len2 == 0: - del items[start:start + delta] - return # shortcut, we already did all that was needed - items[start:start + slicelength] = self.unerase(w_other.lstorage) + len1 = w_list.length() + # Ensure non-negative slicing + if start <= -len1: + start = 0 + elif start < 0: + start += len1 + assert start >= 0 + if len2 == 0 and slicelength > 0: # shortcut, we already did all that was needed + del items[start:start + slicelength] + else: + items[start:start + slicelength] = self.unerase(w_other.lstorage) return elif len2 != slicelength: # No resize for extended slices raise oefmt(self.space.w_ValueError, @@ -1494,9 +1497,18 @@ # self.unerase is valid for both of them other_items = self.unerase(w_other.lstorage) if other_items is items: - if step > 0: - items[start:start + slicelength:step] = other_items + if step > 1: + # Always copy starting from the right to avoid + # having to make a shallow copy in the case where + # the source and destination lists are the same list. + i = len2 - 1 + start += i * step + while i >= 0: + items[start] = other_items[i] + start -= step + i -= 1 else: # step can only be -1 here, so it's equivalent to : + assert step == -1 w_list.reverse() return for i in range(len2): From pypy.commits at gmail.com Wed Dec 16 19:27:55 2015 From: pypy.commits at gmail.com (pjenvey) Date: Wed, 16 Dec 2015 16:27:55 -0800 (PST) Subject: [pypy-commit] pypy default: fix for BSDs lacking pty.h Message-ID: <5672018b.cd1d190a.3dc60.ffff8114@mx.google.com> Author: Philip Jenvey Branch: Changeset: r81364:b3e7453fb7e3 Date: 2015-12-16 16:27 -0800 http://bitbucket.org/pypy/pypy/changeset/b3e7453fb7e3/ Log: fix for BSDs lacking pty.h diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -234,10 +234,16 @@ includes = ['io.h', 'sys/utime.h', 'sys/types.h'] libraries = [] else: + if sys.platform.startswith(('darwin', 'netbsd', 'openbsd')): + _ptyh = 'util.h' + elif sys.platform.startswith('freebsd'): + _ptyh = 'libutil.h' + else: + _ptyh = 'pty.h' includes = ['unistd.h', 'sys/types.h', 'sys/wait.h', 'utime.h', 'sys/time.h', 'sys/times.h', 'grp.h', 'dirent.h', 'sys/stat.h', 'fcntl.h', - 'signal.h', 'pty.h', 'sys/utsname.h'] + 'signal.h', 'sys/utsname.h', _ptyh] libraries = ['util'] eci = ExternalCompilationInfo( includes=includes, From pypy.commits at gmail.com Thu Dec 17 03:21:20 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 00:21:20 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56727080.57e41c0a.d4b1a.5f95@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r678:83ff69e2efd0 Date: 2015-12-17 09:21 +0100 http://bitbucket.org/pypy/pypy.org/changeset/83ff69e2efd0/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61586 of $105000 (58.7%) + $61600 of $105000 (58.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Thu Dec 17 05:27:02 2015 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 17 Dec 2015 02:27:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: pushed forward cond_call_gc_wb, two more tests now passing Message-ID: <56728df6.a9c8c20a.87520.ffff8b37@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81365:9dfc528a3994 Date: 2015-12-17 11:26 +0100 http://bitbucket.org/pypy/pypy/changeset/9dfc528a3994/ Log: pushed forward cond_call_gc_wb, two more tests now passing diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -42,6 +42,7 @@ self.loop_run_counters = [] self.gcrootmap_retaddr_forced = 0 self.failure_recovery_code = [0, 0, 0, 0] + self.wb_slowpath = [0,0,0,0,0] def setup(self, looptoken): BaseAssembler.setup(self, looptoken) @@ -142,7 +143,6 @@ return startpos def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): - return descr = self.cpu.gc_ll_descr.write_barrier_descr if descr is None: return @@ -164,10 +164,15 @@ else: argument_loc = r.r0 - mc = PPCBuilder() + mc = InstrBuilder() old_mc = self.mc self.mc = mc + + # save the information + mc.STG(r.r14, l.addr(14*WORD, r.SP)) + # no need to store the back chain ? mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain + LOCAL_VARS_OFFSET = 0 extra_stack_size = LOCAL_VARS_OFFSET + 4 * WORD + 8 extra_stack_size = (extra_stack_size + 15) & ~15 if for_frame: @@ -179,28 +184,29 @@ # need to save many registers: the registers that are anyway # destroyed by the call can be ignored (VOLATILES), and the # non-volatile registers won't be changed here. It only needs - # to save r.RCS1 (used below), r3 and f1 (possible results of + # to save r.RCS1 (used below), r1 and f0 (possible results of # the call), and two more non-volatile registers (used to store # the RPython exception that occurred in the CALL, if any). # # We need to increase our stack frame size a bit to store them. # - self.mc.load(r.SCRATCH.value, r.SP.value, 0) # SP back chain - self.mc.store_update(r.SCRATCH.value, r.SP.value, -extra_stack_size) - self.mc.std(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) - self.mc.std(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) - self.mc.std(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) - self.mc.std(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) - self.mc.stfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + self.mc.TRAP2() + #self.mc.LGR(r.SCRATCH, l.addr(0,r.SP)) # SP back chain + #self.mc.STG(r.SCRATCH, l.addr(-extra_stack_size, r.SP.value)) + #self.mc.STG(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + #self.mc.STG(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + #self.mc.STG(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + #self.mc.STG(r.r2.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + #self.mc.STD(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) saved_regs = None saved_fp_regs = None else: # push all volatile registers, push RCS1, and sometimes push RCS2 if withcards: - saved_regs = r.VOLATILES + [r.RCS1, r.RCS2] + saved_regs = r.VOLATILES # + [r.RCS1, r.RCS2] else: - saved_regs = r.VOLATILES + [r.RCS1] + saved_regs = r.VOLATILES # + [r.RCS1] if withfloats: saved_fp_regs = r.MANAGED_FP_REGS else: @@ -216,23 +222,29 @@ # of _reload_frame_if_necessary) # This trashes r0 and r2, which is fine in this case assert argument_loc is not r.r0 - self._store_and_reset_exception(mc, r.RCS2, r.RCS3) + # XXX TODO + #self._store_and_reset_exception(mc, r.RCS2, r.RCS3) if withcards: - mc.mr(r.RCS2.value, argument_loc.value) + # XXX TODO + pass + #kmc.mr(r.RCS2.value, argument_loc.value) # # Save the lr into r.RCS1 - mc.mflr(r.RCS1.value) + #mc.mflr(r.RCS1.value) # func = rffi.cast(lltype.Signed, func) # Note: if not 'for_frame', argument_loc is r0, which must carefully # not be overwritten above - mc.mr(r.r3.value, argument_loc.value) + mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain + mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) mc.load_imm(mc.RAW_CALL_REG, func) + mc.LGR(r.r2, argument_loc) mc.raw_call() + mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) # # Restore lr - mc.mtlr(r.RCS1.value) + # TODO mc.mtlr(r.RCS1.value) if for_frame: self._restore_exception(mc, r.RCS2, r.RCS3) @@ -242,22 +254,26 @@ # not follow this instruction with another one that changes # the status of cr0! card_marking_mask = descr.jit_wb_cards_set_singlebyte - mc.lbz(r.RCS2.value, r.RCS2.value, descr.jit_wb_if_flag_byteofs) - mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) + mc.trap() + #mc.lbz(r.RCS2.value, r.RCS2.value, descr.jit_wb_if_flag_byteofs) + #mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) if for_frame: - self.mc.ld(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) - self.mc.ld(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) - self.mc.ld(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) - self.mc.ld(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) - self.mc.lfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) - self.mc.addi(r.SP.value, r.SP.value, extra_stack_size) + self.mc.trap() + #self.mc.ld(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + #self.mc.ld(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + #self.mc.ld(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + #self.mc.ld(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + #self.mc.lfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + #self.mc.addi(r.SP.value, r.SP.value, extra_stack_size) else: self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) - mc.blr() + mc.LG(r.r14, l.addr(14*WORD, r.SP)) + mc.BCR(c.ANY, r.RETURN) + #mc.blr() self.mc = old_mc rawstart = mc.materialize(self.cpu, []) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -65,6 +65,8 @@ class InstrBuilder(BlockBuilderMixin, AbstractZARCHBuilder): + RAW_CALL_REG = r.r14 + def __init__(self): AbstractZARCHBuilder.__init__(self) self.init_block_builder() diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -1,4 +1,6 @@ from rpython.jit.backend.llsupport.jump import remap_frame_layout +from rpython.jit.backend.zarch.arch import (WORD, + STD_FRAME_SIZE_IN_BYTES) from rpython.jit.backend.zarch.arch import THREADLOCAL_ADDR_OFFSET from rpython.jit.backend.zarch.helper.assembler import (gen_emit_cmp_op, gen_emit_rr_or_rpool, gen_emit_shift, gen_emit_pool_or_rr_evenodd, @@ -410,12 +412,12 @@ if is_frame: assert loc_base is r.SPP assert check_imm_value(descr.jit_wb_if_flag_byteofs) - mc.LGB(r.SCRATCH2, l.addr(descr.jit_wb_if_flag_byteofs, loc_base)) + mc.LLGC(r.SCRATCH2, l.addr(descr.jit_wb_if_flag_byteofs, loc_base)) mc.LGR(r.SCRATCH, r.SCRATCH2) mc.NILL(r.SCRATCH, l.imm(mask & 0xFF)) jz_location = mc.get_relative_pos() - mc.trap() # patched later with 'beq' + mc.trap() # patched later with 'EQ' mc.write('\x00' * 4) # for cond_call_gc_wb_array, also add another fast path: @@ -425,7 +427,7 @@ mc.LGR(r.SCRATCH, r.SCRATCH2) mc.NILL(r.SCRATCH, l.imm(card_marking_mask & 0xFF)) js_location = mc.get_relative_pos() - mc.trap() # patched later with 'bne' + mc.trap() # patched later with 'NE' mc.write('\x00' * 4) else: js_location = 0 @@ -447,10 +449,14 @@ assert self.wb_slowpath[helper_num] != 0 # if not is_frame: - mc.mr(r.r0.value, loc_base.value) # unusual argument location - mc.load_imm(r.SCRATCH2, self.wb_slowpath[helper_num]) - mc.mtctr(r.SCRATCH2.value) - mc.bctrl() + mc.LGR(r.r0, loc_base) # unusual argument location + + mc.load_imm(r.r14, self.wb_slowpath[helper_num]) + # alloc a stack frame + mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + mc.BASR(r.r14, r.r14) + # destory the frame + mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) if card_marking_mask: # The helper ends again with a check of the flag in the object. @@ -458,18 +464,19 @@ # taken if GCFLAG_CARDS_SET is still not set. jns_location = mc.get_relative_pos() mc.trap() + mc.write('\x00'*4) # - # patch the 'bne' above + # patch the 'NE' above currpos = mc.currpos() pmc = OverwritingBuilder(mc, js_location, 1) - pmc.bne(currpos - js_location) + pmc.BRCL(c.NE, l.imm(currpos - js_location)) pmc.overwrite() # # case GCFLAG_CARDS_SET: emit a few instructions to do # directly the card flag setting loc_index = arglocs[1] if loc_index.is_reg(): - + xxx tmp_loc = arglocs[2] n = descr.jit_wb_card_page_shift @@ -505,13 +512,13 @@ # patch the beq just above currpos = mc.currpos() pmc = OverwritingBuilder(mc, jns_location, 1) - pmc.beq(currpos - jns_location) + pmc.BRCL(c.EQ, l.imm(currpos - jns_location)) pmc.overwrite() # patch the JZ above currpos = mc.currpos() pmc = OverwritingBuilder(mc, jz_location, 1) - pmc.beq(currpos - jz_location) + pmc.BRCL(c.EQ, l.imm(currpos - jz_location)) pmc.overwrite() def emit_cond_call_gc_wb(self, op, arglocs, regalloc): From pypy.commits at gmail.com Thu Dec 17 06:54:26 2015 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 17 Dec 2015 03:54:26 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: implementing card_masking_mask assembler in _write_barrier_fast_path Message-ID: <5672a272.913bc20a.d29ab.ffffaa1a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81366:a74573715f49 Date: 2015-12-17 12:53 +0100 http://bitbucket.org/pypy/pypy/changeset/a74573715f49/ Log: implementing card_masking_mask assembler in _write_barrier_fast_path diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -169,8 +169,7 @@ self.mc = mc # save the information - mc.STG(r.r14, l.addr(14*WORD, r.SP)) - # no need to store the back chain ? mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain + mc.STG(r.r14, l.addr(14*WORD, r.SP)) # save the link LOCAL_VARS_OFFSET = 0 extra_stack_size = LOCAL_VARS_OFFSET + 4 * WORD + 8 @@ -222,12 +221,11 @@ # of _reload_frame_if_necessary) # This trashes r0 and r2, which is fine in this case assert argument_loc is not r.r0 - # XXX TODO + xxx #self._store_and_reset_exception(mc, r.RCS2, r.RCS3) if withcards: - # XXX TODO - pass + xxx #kmc.mr(r.RCS2.value, argument_loc.value) # # Save the lr into r.RCS1 @@ -242,11 +240,9 @@ mc.LGR(r.r2, argument_loc) mc.raw_call() mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) - # - # Restore lr - # TODO mc.mtlr(r.RCS1.value) if for_frame: + xxx self._restore_exception(mc, r.RCS2, r.RCS3) if withcards: @@ -271,9 +267,8 @@ self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) - mc.LG(r.r14, l.addr(14*WORD, r.SP)) + mc.LG(r.r14, l.addr(14*WORD, r.SP)) # restore the link mc.BCR(c.ANY, r.RETURN) - #mc.blr() self.mc = old_mc rawstart = mc.materialize(self.cpu, []) diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -322,6 +322,18 @@ self.writechar(opcode2) return encode_rie_e +def build_rie_f(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r,i8,i8,i8') + def encode_rie_f(self, reg1, reg2, i1, i2, i3): + self.writechar(opcode1) + byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) + self.writechar(chr(byte)) + self.writechar(chr(i1)) + self.writechar(chr(i2)) + self.writechar(chr(i3)) + self.writechar(opcode2) + return encode_rie_f + def build_rie_a(mnemonic, (opcode1,opcode2)): br = is_branch_relative(mnemonic) @builder.arguments('r,i16,r/m') diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -39,6 +39,10 @@ 'SRLG': ('rsy_a', ['\xEB','\x0C']), 'SLLG': ('rsy_a', ['\xEB','\x0D']), + # rotating + # rotate, then insert selected bits + 'RISBGN': ('rie_f', ['\xEC','\x59']), + # invert & negative & absolute 'LPGR': ('rre', ['\xB9','\x00']), 'LNGR': ('rre', ['\xB9','\x01']), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -476,27 +476,32 @@ # directly the card flag setting loc_index = arglocs[1] if loc_index.is_reg(): - xxx tmp_loc = arglocs[2] n = descr.jit_wb_card_page_shift # compute in tmp_loc the byte offset: # ~(index >> (card_page_shift + 3)) ('~' is 'not_' below) - mc.srli_op(tmp_loc.value, loc_index.value, n + 3) + mc.SRAG(tmp_loc, loc_index, l.addr(n+3)) + #mc.srli_op(tmp_loc.value, loc_index.value, n + 3) + # invert the bits + mc.XIHF(tmp_loc, l.imm(0xffffFFFF)) + mc.XILF(tmp_loc, l.imm(0xffffFFFF)) # compute in r2 the index of the bit inside the byte: # (index >> card_page_shift) & 7 - mc.rldicl(r.SCRATCH2.value, loc_index.value, 64 - n, 61) - mc.li(r.SCRATCH.value, 1) - mc.not_(tmp_loc.value, tmp_loc.value) + # 0x80 sets zero flag. will store 0 into all selected bits + mc.RISBGN(r.SCRATCH2, loc_index, l.imm(3), l.imm(0x80 | 63), l.imm(61)) + #mc.rldicl(r.SCRATCH2.value, loc_index.value, 64 - n, 61) # set r2 to 1 << r2 - mc.sl_op(r.SCRATCH2.value, r.SCRATCH.value, r.SCRATCH2.value) + mc.LGHI(r.SCRATCH, l.imm(1)) + mc.SLAG(r.SCRATCH2, r.SCRATCH, l.addr(0,r.SCRATCH2)) # set this bit inside the byte of interest - mc.lbzx(r.SCRATCH.value, loc_base.value, tmp_loc.value) - mc.or_(r.SCRATCH.value, r.SCRATCH.value, r.SCRATCH2.value) - mc.stbx(r.SCRATCH.value, loc_base.value, tmp_loc.value) + addr = l.addr(0, loc_base, tmp_loc) + mc.LLGC(r.SCRATCH, addr) + mc.OGR(r.SCRATCH, r.SCRATCH2) + mc.STCY(r.SCRATCH, addr) # done else: @@ -505,9 +510,10 @@ byte_val = 1 << (byte_index & 7) assert check_imm_value(byte_ofs) - mc.lbz(r.SCRATCH.value, loc_base.value, byte_ofs) - mc.ori(r.SCRATCH.value, r.SCRATCH.value, byte_val) - mc.stb(r.SCRATCH.value, loc_base.value, byte_ofs) + addr = l.addr(byte_ofs, loc_base) + mc.LLGC(r.SCRATCH, addr) + mc.OILL(r.SCRATCH, l.imm(byte_val)) + mc.STCY(r.SCRATCH, addr) # # patch the beq just above currpos = mc.currpos() From pypy.commits at gmail.com Thu Dec 17 07:50:27 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 04:50:27 -0800 (PST) Subject: [pypy-commit] cffi default: fixes Message-ID: <5672af93.8673c20a.386b4.ffffbb6d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2481:935bf011a8cf Date: 2015-12-17 13:50 +0100 http://bitbucket.org/cffi/cffi/changeset/935bf011a8cf/ Log: fixes diff --git a/c/commontypes.c b/c/commontypes.c --- a/c/commontypes.c +++ b/c/commontypes.c @@ -199,7 +199,8 @@ static PyObject *b__get_common_types(PyObject *self, PyObject *arg) { - int i, err; + int err; + size_t i; for (i = 0; i < num_common_simple_types; i++) { const char *s = common_simple_types[i]; PyObject *o = PyText_FromString(s + strlen(s) + 1); diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -100,6 +100,17 @@ #endif +/* Seems that CPython 3.5 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if (PY_MAJOR_VERSION * 1000 + PY_MINOR_VERSION) >= 3005 +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + + static PyThreadState *get_current_ts(void) { #if PY_MAJOR_VERSION >= 3 From pypy.commits at gmail.com Thu Dec 17 08:19:28 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 05:19:28 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: make release branch Message-ID: <5672b660.043fc20a.bab5f.ffffc2aa@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2482:052ec14c86c3 Date: 2015-12-17 13:54 +0100 http://bitbucket.org/cffi/cffi/changeset/052ec14c86c3/ Log: make release branch From pypy.commits at gmail.com Thu Dec 17 09:29:23 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 06:29:23 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: md5/sha1 Message-ID: <5672c6c3.d10d1c0a.d5402.ffffde57@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2483:532cee0a744a Date: 2015-12-17 15:29 +0100 http://bitbucket.org/cffi/cffi/changeset/532cee0a744a/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.4.0.tar.gz - - MD5: ... + - MD5: 270d09b3c45851df3478bf5fffbad6be - - SHA: ... + - SHA: 589f3a256cfc7826c1e765fea20d67cf63d70dd5 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Thu Dec 17 09:29:40 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 06:29:40 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge release-1.4 Message-ID: <5672c6d4.a756c20a.3204f.ffffdff8@mx.google.com> Author: Armin Rigo Branch: Changeset: r2484:0289deec3d92 Date: 2015-12-17 15:29 +0100 http://bitbucket.org/cffi/cffi/changeset/0289deec3d92/ Log: hg merge release-1.4 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.4.0.tar.gz - - MD5: ... + - MD5: 270d09b3c45851df3478bf5fffbad6be - - SHA: ... + - SHA: 589f3a256cfc7826c1e765fea20d67cf63d70dd5 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Thu Dec 17 10:19:46 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:19:46 -0800 (PST) Subject: [pypy-commit] cffi default: grumble, the disappearance of _Py_atomic_load_relaxed and Message-ID: <5672d292.a415c20a.4d933.ffffee98@mx.google.com> Author: Armin Rigo Branch: Changeset: r2485:d6943ffe2fe8 Date: 2015-12-17 16:19 +0100 http://bitbucket.org/cffi/cffi/changeset/d6943ffe2fe8/ Log: grumble, the disappearance of _Py_atomic_load_relaxed and _PyThreadState_Current occurred in 3.5.1, not in 3.5. diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -100,11 +100,11 @@ #endif -/* Seems that CPython 3.5 made our job harder. Did not find out how +/* Seems that CPython 3.5.1 made our job harder. Did not find out how to do that without these hacks. We can't use PyThreadState_GET(), because that calls PyThreadState_Get() which fails an assert if the result is NULL. */ -#if (PY_MAJOR_VERSION * 1000 + PY_MINOR_VERSION) >= 3005 +#ifndef _Py_atomic_load_relaxed /* this was abruptly un-defined in 3.5.1 */ void *volatile _PyThreadState_Current; /* XXX simple volatile access is assumed atomic */ # define _Py_atomic_load_relaxed(pp) (*(pp)) From pypy.commits at gmail.com Thu Dec 17 10:40:49 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:40:49 -0800 (PST) Subject: [pypy-commit] cffi default: Document 1.4.1 Message-ID: <5672d781.2457c20a.d9372.fffffa04@mx.google.com> Author: Armin Rigo Branch: Changeset: r2486:0e3ade655540 Date: 2015-12-17 16:26 +0100 http://bitbucket.org/cffi/cffi/changeset/0e3ade655540/ Log: Document 1.4.1 diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,16 @@ ====================== +v1.4.1 +====== + +* Fix the compilation failure of cffi on CPython 3.5.0. (3.5.1 works; + some detail changed that makes some underscore-starting macros + disappear from view of extension modules, and I worked around it, + thinking it changed in all 3.5 versions---but no: it was only in + 3.5.1.) + + v1.4.0 ====== From pypy.commits at gmail.com Thu Dec 17 10:40:51 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:40:51 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: hg merge default Message-ID: <5672d783.87de1c0a.ab36d.fffff44f@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2487:77c28957c976 Date: 2015-12-17 16:26 +0100 http://bitbucket.org/cffi/cffi/changeset/77c28957c976/ Log: hg merge default diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -100,11 +100,11 @@ #endif -/* Seems that CPython 3.5 made our job harder. Did not find out how +/* Seems that CPython 3.5.1 made our job harder. Did not find out how to do that without these hacks. We can't use PyThreadState_GET(), because that calls PyThreadState_Get() which fails an assert if the result is NULL. */ -#if (PY_MAJOR_VERSION * 1000 + PY_MINOR_VERSION) >= 3005 +#ifndef _Py_atomic_load_relaxed /* this was abruptly un-defined in 3.5.1 */ void *volatile _PyThreadState_Current; /* XXX simple volatile access is assumed atomic */ # define _Py_atomic_load_relaxed(pp) (*(pp)) diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,16 @@ ====================== +v1.4.1 +====== + +* Fix the compilation failure of cffi on CPython 3.5.0. (3.5.1 works; + some detail changed that makes some underscore-starting macros + disappear from view of extension modules, and I worked around it, + thinking it changed in all 3.5 versions---but no: it was only in + 3.5.1.) + + v1.4.0 ====== From pypy.commits at gmail.com Thu Dec 17 10:40:56 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:40:56 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge release-1.4 Message-ID: <5672d788.e16ec20a.f0264.ffffc3b8@mx.google.com> Author: Armin Rigo Branch: Changeset: r2490:3c9573e3475c Date: 2015-12-17 16:40 +0100 http://bitbucket.org/cffi/cffi/changeset/3c9573e3475c/ Log: hg merge release-1.4 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6498,7 +6498,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.0"); + v = PyText_FromString("1.4.1"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -104,7 +104,8 @@ to do that without these hacks. We can't use PyThreadState_GET(), because that calls PyThreadState_Get() which fails an assert if the result is NULL. */ -#ifndef _Py_atomic_load_relaxed /* this was abruptly un-defined in 3.5.1 */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ void *volatile _PyThreadState_Current; /* XXX simple volatile access is assumed atomic */ # define _Py_atomic_load_relaxed(pp) (*(pp)) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.0' +release = '1.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.0.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.1.tar.gz - - MD5: 270d09b3c45851df3478bf5fffbad6be + - MD5: 73c2047f598ac7d8b7a5cd8e6d835c42 - - SHA: 589f3a256cfc7826c1e765fea20d67cf63d70dd5 + - SHA: 0a00384281bca841380766b0b41087d105e428a5 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.0', + version='1.4.1', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, From pypy.commits at gmail.com Thu Dec 17 10:40:53 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:40:53 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: bump version number Message-ID: <5672d785.a658c20a.49ee6.fffff7c0@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2488:27015e9143b8 Date: 2015-12-17 16:30 +0100 http://bitbucket.org/cffi/cffi/changeset/27015e9143b8/ Log: bump version number diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6498,7 +6498,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.0"); + v = PyText_FromString("1.4.1"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h --- a/c/misc_thread_posix.h +++ b/c/misc_thread_posix.h @@ -104,7 +104,8 @@ to do that without these hacks. We can't use PyThreadState_GET(), because that calls PyThreadState_Get() which fails an assert if the result is NULL. */ -#ifndef _Py_atomic_load_relaxed /* this was abruptly un-defined in 3.5.1 */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ void *volatile _PyThreadState_Current; /* XXX simple volatile access is assumed atomic */ # define _Py_atomic_load_relaxed(pp) (*(pp)) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.0' +release = '1.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.0.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.1.tar.gz - - MD5: 270d09b3c45851df3478bf5fffbad6be + - MD5: ... - - SHA: 589f3a256cfc7826c1e765fea20d67cf63d70dd5 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.0', + version='1.4.1', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, From pypy.commits at gmail.com Thu Dec 17 10:40:55 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:40:55 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: md5/sha1 Message-ID: <5672d787.a658c20a.49ee6.fffff7c4@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2489:5d859607e885 Date: 2015-12-17 16:40 +0100 http://bitbucket.org/cffi/cffi/changeset/5d859607e885/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.4.1.tar.gz - - MD5: ... + - MD5: 73c2047f598ac7d8b7a5cd8e6d835c42 - - SHA: ... + - SHA: 0a00384281bca841380766b0b41087d105e428a5 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Thu Dec 17 10:47:27 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 07:47:27 -0800 (PST) Subject: [pypy-commit] pypy default: bump cffi version number to 1.4.1 Message-ID: <5672d90f.068e1c0a.12241.fffffbab@mx.google.com> Author: Armin Rigo Branch: Changeset: r81367:38851030d8c4 Date: 2015-12-17 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/38851030d8c4/ Log: bump cffi version number to 1.4.1 diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.0 +Version: 1.4.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.0" +VERSION = "1.4.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): From pypy.commits at gmail.com Thu Dec 17 15:13:00 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 17 Dec 2015 12:13:00 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: add include_dirs kwarg to import_module Message-ID: <5673174c.43c31c0a.d5195.5d3e@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81368:848fb779298b Date: 2015-12-16 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/848fb779298b/ Log: add include_dirs kwarg to import_module diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -47,7 +47,7 @@ assert 'PyModule_Check' in api.FUNCTIONS assert api.FUNCTIONS['PyModule_Check'].argtypes == [api.PyObject] -def compile_extension_module(space, modname, **kwds): +def compile_extension_module(space, modname, include_dirs=[], **kwds): """ Build an extension module and return the filename of the resulting native code file. @@ -73,11 +73,11 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-g", "-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( - include_dirs=api.include_dirs, + include_dirs=api.include_dirs + include_dirs, **kwds ) eci = eci.convert_sources_to_files() @@ -91,7 +91,7 @@ soname.rename(pydname) return str(pydname) -def compile_extension_module_applevel(space, modname, **kwds): +def compile_extension_module_applevel(space, modname, include_dirs=[], **kwds): """ Build an extension module and return the filename of the resulting native code file. @@ -107,11 +107,11 @@ elif sys.platform == 'darwin': pass elif sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-g","-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( - include_dirs = [space.include_dir], + include_dirs = [space.include_dir] + include_dirs, **kwds ) eci = eci.convert_sources_to_files() @@ -279,8 +279,8 @@ @gateway.unwrap_spec(name=str, init='str_or_None', body=str, load_it=bool, filename='str_or_None', PY_SSIZE_T_CLEAN=bool) - def import_module(space, name, init=None, body='', - load_it=True, filename=None, + def import_module(space, name, init=None, body='', load_it=True, + filename=None, include_dirs=[], PY_SSIZE_T_CLEAN=False): """ init specifies the overall template of the module. @@ -317,7 +317,7 @@ filename = py.path.local(pypydir) / 'module' \ / 'cpyext'/ 'test' / (filename + ".c") kwds = dict(separate_module_files=[filename]) - + kwds['include_dirs'] = include_dirs mod = self.compile_extension_module(space, name, **kwds) if load_it: @@ -340,9 +340,11 @@ space.sys.get('modules'), space.wrap(name)) - @gateway.unwrap_spec(modname=str, prologue=str, more_init=str, PY_SSIZE_T_CLEAN=bool) + @gateway.unwrap_spec(modname=str, prologue=str, include_dirs=list, + more_init=str, PY_SSIZE_T_CLEAN=bool) def import_extension(space, modname, w_functions, prologue="", - more_init="", PY_SSIZE_T_CLEAN=False): + include_dirs=[], more_init="", PY_SSIZE_T_CLEAN=False): + include_dirs = [space.unwrap(d) for d in include_dirs] functions = space.unwrap(w_functions) methods_table = [] codes = [] @@ -368,6 +370,7 @@ if more_init: init += more_init return import_module(space, name=modname, init=init, body=body, + include_dirs=include_dirs, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) @gateway.unwrap_spec(name=str) @@ -389,7 +392,10 @@ from distutils.sysconfig import get_python_inc class FakeSpace(object): def unwrap(self, args): - return args + try: + return args.str_w(None) + except: + return args fake = FakeSpace() fake.include_dir = get_python_inc() fake.config = self.space.config From pypy.commits at gmail.com Thu Dec 17 15:13:03 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 17 Dec 2015 12:13:03 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix for untranslated tests, start to run -A numpy tests Message-ID: <5673174f.8e371c0a.e9e2b.5e96@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81369:55993b380e68 Date: 2015-12-17 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/55993b380e68/ Log: fix for untranslated tests, start to run -A numpy tests diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -277,10 +277,10 @@ return space.wrap(pydname) @gateway.unwrap_spec(name=str, init='str_or_None', body=str, - load_it=bool, filename='str_or_None', + load_it=bool, filename='str_or_None', PY_SSIZE_T_CLEAN=bool) def import_module(space, name, init=None, body='', load_it=True, - filename=None, include_dirs=[], + filename=None, w_include_dirs=None, PY_SSIZE_T_CLEAN=False): """ init specifies the overall template of the module. @@ -291,6 +291,10 @@ if filename is None, the module name will be used to construct the filename. """ + if w_include_dirs is None: + include_dirs = [] + else: + include_dirs = [space.str_w(s) for s in space.listview(w_include_dirs)] if init is not None: code = """ %(PY_SSIZE_T_CLEAN)s @@ -340,11 +344,10 @@ space.sys.get('modules'), space.wrap(name)) - @gateway.unwrap_spec(modname=str, prologue=str, include_dirs=list, + @gateway.unwrap_spec(modname=str, prologue=str, more_init=str, PY_SSIZE_T_CLEAN=bool) def import_extension(space, modname, w_functions, prologue="", - include_dirs=[], more_init="", PY_SSIZE_T_CLEAN=False): - include_dirs = [space.unwrap(d) for d in include_dirs] + w_include_dirs=None, more_init="", PY_SSIZE_T_CLEAN=False): functions = space.unwrap(w_functions) methods_table = [] codes = [] @@ -370,7 +373,7 @@ if more_init: init += more_init return import_module(space, name=modname, init=init, body=body, - include_dirs=include_dirs, + w_include_dirs=w_include_dirs, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) @gateway.unwrap_spec(name=str) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -244,7 +244,7 @@ ''' npy_intp dims[2] ={2, 3}; PyObject * obj = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj, 42); + PyArray_FILLWBYTE((PyArrayObject*)obj, 42); return obj; ''' ), @@ -252,12 +252,19 @@ ''' npy_intp dims1[2] ={2, 3}; npy_intp dims2[2] ={3, 2}; + int ok; PyObject * obj1 = PyArray_ZEROS(2, dims1, 11, 0); PyObject * obj2 = PyArray_ZEROS(2, dims2, 11, 0); - PyArray_FILLWBYTE(obj2, 42); - PyArray_CopyInto(obj2, obj1); - Py_DECREF(obj1); - return obj2; + PyArray_FILLWBYTE((PyArrayObject*)obj2, 42); + ok = PyArray_CopyInto((PyArrayObject*)obj2, (PyArrayObject*)obj1); + Py_DECREF(obj2); + if (ok < 0) + { + /* Should have failed */ + Py_DECREF(obj1); + return NULL; + } + return obj1; ''' ), ("test_FromAny", "METH_NOARGS", @@ -286,7 +293,16 @@ return _PyArray_DescrFromType(typenum); """ ), - ], prologue='#include ') + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + ''', + more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif + ''') arr = mod.test_simplenew() assert arr.shape == (2, 3) assert arr.dtype.num == 11 #float32 dtype @@ -302,7 +318,6 @@ dt = mod.test_DescrFromType(11) assert dt.num == 11 - def test_pass_ndarray_object_to_c(self): from _numpypy.multiarray import ndarray mod = self.import_extension('foo', [ @@ -314,7 +329,16 @@ Py_INCREF(obj); return obj; '''), - ], prologue='#include ') + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + ''', + more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif + ''') array = ndarray((3, 4), dtype='d') assert mod.check_array(array) is array raises(TypeError, "mod.check_array(42)") @@ -356,9 +380,13 @@ "a ufunc that tests a more complicated signature", 0, "(m,m)->(m,m)"); """), - ], prologue=''' - #include "numpy/ndarraytypes.h" - /*#include generated by numpy setup.py*/ + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + #ifndef PYPY_VERSION + #include /*generated by numpy setup.py*/ + #endif typedef void (*PyUFuncGenericFunction) (char **args, npy_intp *dimensions, @@ -423,6 +451,10 @@ *((float *)args[1]) = res; }; + ''', more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif ''') sq = arange(18, dtype="float32").reshape(2,3,3) float_ufunc = mod.create_float_ufunc_3x3() From pypy.commits at gmail.com Thu Dec 17 15:13:04 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 17 Dec 2015 12:13:04 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: move PyArray_CopyInto into python Message-ID: <56731750.95151c0a.96895.5e53@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81370:fc642900fdf9 Date: 2015-12-17 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/fc642900fdf9/ Log: move PyArray_CopyInto into python diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1074,8 +1074,7 @@ import pypy.module.cpyext.ndarrayobject global GLOBALS, SYMBOLS_C, separate_module_files GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', - '_PyArray_CopyInto'] + SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -197,11 +197,9 @@ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); -PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); #define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto #define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -170,13 +170,13 @@ return w_array @cpython_api([Py_ssize_t], PyObject) -def _PyArray_DescrFromType(space, typenum): +def PyArray_DescrFromType(space, typenum): try: dtype = get_dtype_cache(space).dtypes_by_num[typenum] return dtype except KeyError: raise OperationError(space.w_ValueError, space.wrap( - '_PyArray_DescrFromType called with invalid dtype %d' % typenum)) + 'PyArray_DescrFromType called with invalid dtype %d' % typenum)) @cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject) def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth): @@ -250,6 +250,16 @@ return simple_new(space, nd, dims, typenum, order=order, owning=owning, w_subtype=w_subtype) + at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) +def PyArray_CopyInto(space, w_dest, w_src): + assert isinstance(w_dest, W_NDimArray) + assert isinstance(w_src, W_NDimArray) + space.appexec([w_dest, w_src], """(dest, src): + dest[:] = src + """ ) + return 0 + + gufunctype = lltype.Ptr(ufuncs.GenericUfunc) # XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there # a problem with casting function pointers? diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -16,10 +16,3 @@ return arr; } -int -_PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) -{ - memcpy(PyArray_DATA(dest), PyArray_DATA(src), PyArray_NBYTES(dest)); - return 0; -} - From pypy.commits at gmail.com Thu Dec 17 15:13:06 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 17 Dec 2015 12:13:06 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: hack away again till tests run w/ -A Message-ID: <56731752.a415c20a.4d933.6176@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81371:030855655208 Date: 2015-12-17 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/030855655208/ Log: hack away again till tests run w/ -A diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -394,6 +394,10 @@ def interp2app(func): from distutils.sysconfig import get_python_inc class FakeSpace(object): + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough def unwrap(self, args): try: return args.str_w(None) @@ -403,6 +407,10 @@ fake.include_dir = get_python_inc() fake.config = self.space.config def run(*args, **kwargs): + for k in kwargs.keys(): + if k not in func.unwrap_spec and not k.startswith('w_'): + v = kwargs.pop(k) + kwargs['w_' + k] = v return func(fake, *args, **kwargs) return run def wrap(func): From pypy.commits at gmail.com Thu Dec 17 15:13:08 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 17 Dec 2015 12:13:08 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: pass some -A ndarray tests Message-ID: <56731754.c6ecc20a.ff6b1.6042@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81372:b6a99ac4c4c2 Date: 2015-12-17 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/b6a99ac4c4c2/ Log: pass some -A ndarray tests diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -226,11 +226,19 @@ ''' class AppTestNDArray(AppTestCpythonExtensionBase): - if self.runappdirect: - try: - import numpy - except: - skip('numpy not importable') + + def setup_class(cls): + AppTestCpythonExtensionBase.setup_class.im_func(cls) + if cls.runappdirect: + try: + import numpy + cls.w_numpy_include = [numpy.get_include()] + except: + skip('numpy not importable') + else: + cls.w_numpy_include = cls.space.wrap([]) + + def test_ndarray_object_c(self): mod = self.import_extension('foo', [ ("test_simplenew", "METH_NOARGS", @@ -271,8 +279,8 @@ ''' npy_intp dims[2] ={2, 3}; PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj1, 42); - obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); + PyArray_FILLWBYTE((PyArrayObject*)obj1, 42); + obj2 = PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); Py_DECREF(obj1); return obj2; ''' @@ -281,8 +289,8 @@ ''' npy_intp dims[2] ={2, 3}; PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj1, 42); - obj2 = _PyArray_FromObject(obj1, 12, 0, 0); + PyArray_FILLWBYTE((PyArrayObject*)obj1, 42); + obj2 = PyArray_FromObject(obj1, 12, 0, 0); Py_DECREF(obj1); return obj2; ''' @@ -290,7 +298,7 @@ ("test_DescrFromType", "METH_O", """ Signed typenum = PyInt_AsLong(args); - return _PyArray_DescrFromType(typenum); + return PyArray_DescrFromType(typenum); """ ), ], include_dirs=self.numpy_include, @@ -310,8 +318,7 @@ assert arr.shape == (2, 3) assert arr.dtype.num == 1 #int8 dtype assert (arr == 42).all() - arr = mod.test_copy() - assert (arr == 0).all() + raises(ValueError, mod.test_copy) #Make sure these work without errors arr = mod.test_FromAny() arr = mod.test_FromObject() @@ -319,7 +326,10 @@ assert dt.num == 11 def test_pass_ndarray_object_to_c(self): - from _numpypy.multiarray import ndarray + if self.runappdirect: + from numpy import ndarray + else: + from _numpypy.multiarray import ndarray mod = self.import_extension('foo', [ ("check_array", "METH_VARARGS", ''' @@ -344,7 +354,10 @@ raises(TypeError, "mod.check_array(42)") def test_ufunc(self): - from _numpypy.multiarray import arange + if self.runappdirect: + py.test.xfail('why does this segfault on cpython?') + else: + from _numpypy.multiarray import arange mod = self.import_extension('foo', [ ("create_ufunc_basic", "METH_NOARGS", """ From pypy.commits at gmail.com Thu Dec 17 17:09:33 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Dec 2015 14:09:33 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5673329d.02b3c20a.a4bbb.ffff80be@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r679:4418e93fa420 Date: 2015-12-17 23:09 +0100 http://bitbucket.org/pypy/pypy.org/changeset/4418e93fa420/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61600 of $105000 (58.7%) + $61605 of $105000 (58.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30219 of $80000 (37.8%) + $30224 of $80000 (37.8%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Fri Dec 18 09:48:40 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 06:48:40 -0800 (PST) Subject: [pypy-commit] pypy default: add merged branch Message-ID: <56741cc8.4d8e1c0a.f6ed6.77c6@mx.google.com> Author: Armin Rigo Branch: Changeset: r81373:365503b19799 Date: 2015-12-18 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/365503b19799/ Log: add merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -84,3 +84,4 @@ .. branch: test-AF_NETLINK .. branch: small-cleanups-misc +.. branch: cpyext-slotdefs From pypy.commits at gmail.com Fri Dec 18 09:56:18 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 06:56:18 -0800 (PST) Subject: [pypy-commit] pypy default: Need '-lrt' to use clock_gettime. Fix the tests on some random subset of Linuxes Message-ID: <56741e92.82df1c0a.79914.ffff8130@mx.google.com> Author: Armin Rigo Branch: Changeset: r81374:de2631dc2ed7 Date: 2015-12-18 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/de2631dc2ed7/ Log: Need '-lrt' to use clock_gettime. Fix the tests on some random subset of Linuxes diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -165,9 +165,11 @@ globals().update(rffi_platform.configure(CConfigForClockGetTime)) TIMESPEC = TIMESPEC CLOCK_PROCESS_CPUTIME_ID = CLOCK_PROCESS_CPUTIME_ID + eci_with_lrt = eci.merge(ExternalCompilationInfo(libraries=['rt'])) c_clock_gettime = external('clock_gettime', [lltype.Signed, lltype.Ptr(TIMESPEC)], - rffi.INT, releasegil=False) + rffi.INT, releasegil=False, + compilation_info=eci_with_lrt) else: RUSAGE = RUSAGE RUSAGE_SELF = RUSAGE_SELF or 0 From pypy.commits at gmail.com Fri Dec 18 10:13:40 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 07:13:40 -0800 (PST) Subject: [pypy-commit] pypy default: times() can return a negative value (or even -1) even if there is no Message-ID: <567422a4.cc2f1c0a.6c85e.ffff901a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81375:d38aef1c3eb9 Date: 2015-12-18 16:11 +0100 http://bitbucket.org/pypy/pypy/changeset/d38aef1c3eb9/ Log: times() can return a negative value (or even -1) even if there is no error diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1276,7 +1276,8 @@ if not _WIN32: TMSP = lltype.Ptr(TMS) c_times = external('times', [TMSP], CLOCK_T, - save_err=rffi.RFFI_SAVE_ERRNO) + save_err=rffi.RFFI_SAVE_ERRNO | + rffi.RFFI_ZERO_ERRNO_BEFORE) # Here is a random extra platform parameter which is important. # Strictly speaking, this should probably be retrieved at runtime, not @@ -1298,7 +1299,13 @@ if not _WIN32: l_tmsbuf = lltype.malloc(TMSP.TO, flavor='raw') try: - result = handle_posix_error('times', c_times(l_tmsbuf)) + # note: times() can return a negative value (or even -1) + # even if there is no error + result = widen(c_times(l_tmsbuf)) + if result == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, 'times() failed') return ( rffi.cast(lltype.Signed, l_tmsbuf.c_tms_utime) / CLOCK_TICKS_PER_SECOND, From pypy.commits at gmail.com Fri Dec 18 10:35:33 2015 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Dec 2015 07:35:33 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: hg merge default Message-ID: <567427c5.043fc20a.bab5f.ffff95f1@mx.google.com> Author: Ronan Lamy Branch: fix-2198 Changeset: r81376:86b5972fa57a Date: 2015-12-18 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/86b5972fa57a/ Log: hg merge default diff too long, truncating to 2000 out of 2340 lines diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.0 +Version: 1.4.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,30 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) + +Then get the branch `stmgc-c8`_ of PyPy and run:: cd pypy/goal ../../rpython/bin/rpython -Ojit --stm - PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -114,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -81,3 +81,15 @@ Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and turn them into regular RPython functions. Most RPython-compatible `os.*` functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK +.. branch: small-cleanups-misc +.. branch: cpyext-slotdefs diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.0" +VERSION = "1.4.1" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -40,10 +40,9 @@ at least 8 bytes in size. """ from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -71,9 +70,7 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) rffi.stackcounter.stacks_counter -= 1 - before = rffi.aroundstate.before - if before: - before() + rgil.release() def get_ll_cffi_call_python(): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -124,7 +124,7 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -602,6 +602,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -617,9 +618,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -692,9 +691,7 @@ pypy_debug_catch_fatal_exception() rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,14 +4,14 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -65,22 +65,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -591,45 +591,92 @@ def test_binaryfunc(self): module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), }; - static PyNumberMethods foo_as_number; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), + }; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 5): + py.test.skip("re-enable me in version 1.5") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -380,6 +380,8 @@ # the call that it is no longer equal to css. See description # in translator/c/src/thread_pthread.c. + # XXX some duplicated logic here, but note that rgil.acquire() + # does more than just RPyGilAcquire() if old_rpy_fastgil == 0: # this case occurs if some other thread stole the GIL but # released it again. What occurred here is that we changed @@ -390,9 +392,8 @@ elif old_rpy_fastgil == 1: # 'rpy_fastgil' was (and still is) locked by someone else. # We need to wait for the regular mutex. - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() else: # stole the GIL from a different thread that is also # currently in an external call from the jit. Attach @@ -421,9 +422,8 @@ # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() _REACQGIL0_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) _REACQGIL2_FUNC = lltype.Ptr(lltype.FuncType([rffi.CCHARP, lltype.Signed], diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -17,7 +17,6 @@ from rpython.jit.backend.llsupport.test.test_regalloc_integration import BaseTestRegalloc from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter import longlong -from rpython.rlib.objectmodel import invoke_around_extcall CPU = getcpuclass() @@ -625,9 +624,6 @@ self.S = S self.cpu = cpu - def teardown_method(self, meth): - rffi.aroundstate._cleanup_() - def test_shadowstack_call(self): cpu = self.cpu cpu.gc_ll_descr.init_nursery(100) diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.jit import dont_look_inside -from rpython.rlib.objectmodel import invoke_around_extcall from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib import rposix @@ -16,20 +15,10 @@ compile_kwds = dict(enable_opts=ALL_OPTS_NAMES, thread=True) def define_simple(self): - class Glob: - def __init__(self): - self.event = 0 - glob = Glob() - # - c_strchr = rffi.llexternal('strchr', [rffi.CCHARP, lltype.Signed], rffi.CCHARP) - def func(): - glob.event += 1 - def before(n, x): - invoke_around_extcall(func, func) return (n, None, None, None, None, None, None, None, None, None, None, None) # @@ -73,7 +62,8 @@ def f42(n): length = len(glob.lst) raw = alloc1() - fn = llhelper(CALLBACK, rffi._make_wrapper_for(CALLBACK, callback)) + wrapper = rffi._make_wrapper_for(CALLBACK, callback, None, True) + fn = llhelper(CALLBACK, wrapper) if n & 1: # to create a loop and a bridge, and also pass # to run the qsort() call in the blackhole interp c_qsort(rffi.cast(rffi.VOIDP, raw), rffi.cast(rffi.SIZE_T, 2), diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1170,10 +1170,11 @@ 'GC_LOAD/3/rfi', # parameters GC_LOAD_INDEXED # 1: pointer to complex object - # 2: integer describing the offset + # 2: integer describing the index # 3: constant integer scale factor - # 4: constant integer offset + # 4: constant integer base offset (final offset is 'base + scale * index') # 5: constant integer. byte size of datatype to load (negative if it is signed) + # (GC_LOAD is equivalent to GC_LOAD_INDEXED with arg3==1, arg4==0) 'GC_LOAD_INDEXED/5/rfi', '_RAW_LOAD_FIRST', @@ -1204,8 +1205,9 @@ # same paramters as GC_LOAD, but one additional for the value to store # note that the itemsize is not signed! + # (gcptr, index, value, [scale, base_offset,] itemsize) 'GC_STORE/4d/n', - 'GC_STORE_INDEXED/5d/n', + 'GC_STORE_INDEXED/6d/n', 'INCREMENT_DEBUG_COUNTER/1/n', '_RAW_STORE_FIRST', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4044,7 +4044,7 @@ self.interp_operations(f, []) def test_external_call(self): - from rpython.rlib.objectmodel import invoke_around_extcall + from rpython.rlib import rgil TIME_T = lltype.Signed # ^^^ some 32-bit platforms have a 64-bit rffi.TIME_T, but we @@ -4058,11 +4058,6 @@ pass state = State() - def before(): - if we_are_jitted(): - raise Oups - state.l.append("before") - def after(): if we_are_jitted(): raise Oups @@ -4070,14 +4065,14 @@ def f(): state.l = [] - invoke_around_extcall(before, after) + rgil.invoke_after_thread_switch(after) external(lltype.nullptr(T.TO)) return len(state.l) res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 self.check_operations_history(call_release_gil_i=1, call_may_force_i=0) def test_unescaped_write_zero(self): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1124,8 +1124,8 @@ resultvar=op.result) def gct_gc_thread_run(self, hop): - assert self.translator.config.translation.thread - if hasattr(self.root_walker, 'thread_run_ptr'): + if (self.translator.config.translation.thread and + hasattr(self.root_walker, 'thread_run_ptr')): livevars = self.push_roots(hop) assert not livevars, "live GC var around %s!" % (hop.spaceop,) hop.genop("direct_call", [self.root_walker.thread_run_ptr]) diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -184,6 +184,9 @@ hdr.tid |= self.gc.gcflag_extra return (hdr.tid & self.gc.gcflag_extra) != 0 + def thread_run(self): + pass + # ____________________________________________________________ class LLInterpRootWalker: diff --git a/rpython/rlib/_rposix_repr.py b/rpython/rlib/_rposix_repr.py deleted file mode 100644 --- a/rpython/rlib/_rposix_repr.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -RTyping support for os.stat_result objects. -They are rtyped just like a tuple of the correct length supporting -only indexing and the st_xxx attributes. We need a custom StatResultRepr -because when rtyping for LL backends we have extra platform-dependent -items at the end of the tuple, but for OO backends we only want the -portable items. This allows the OO backends to assume a fixed shape for -the tuples returned by os.stat(). -""" -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.flowspace.model import Constant -from rpython.flowspace.operation import op -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import Repr -from rpython.rtyper.rint import IntegerRepr -from rpython.rtyper.error import TyperError -from rpython.rlib import rposix_stat - - -class StatResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.stat_fields = rposix_stat.STAT_FIELDS - - self.stat_field_indexes = {} - for i, (name, TYPE) in enumerate(self.stat_fields): - self.stat_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.stat_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.stat_field_indexes[attr] - except KeyError: - raise TyperError("os.stat().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatResultRepr, IntegerRepr)): - - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_stat_result(hop): - r_StatResult = hop.rtyper.getrepr(rposix_stat.s_StatResult) - [v_result] = hop.inputargs(r_StatResult.r_tuple) - # no-op conversion from r_StatResult.r_tuple to r_StatResult - hop.exception_cannot_occur() - return v_result - - -class StatvfsResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.statvfs_fields = rposix_stat.STATVFS_FIELDS - - self.statvfs_field_indexes = {} - for i, (name, TYPE) in enumerate(self.statvfs_fields): - self.statvfs_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.statvfs_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.statvfs_field_indexes[attr] - except KeyError: - raise TyperError("os.statvfs().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_statvfs_result(hop): - r_StatvfsResult = hop.rtyper.getrepr(rposix_stat.s_StatvfsResult) - [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) - hop.exception_cannot_occur() - return v_result diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -56,10 +56,11 @@ """ def deco(func): source = py.code.Source(""" + from rpython.rlib import rgil + def wrapper(%(args)s): # acquire the GIL - after = rffi.aroundstate.after - if after: after() + rgil.acquire() # rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -78,8 +79,7 @@ assert 0 # dead code rffi.stackcounter.stacks_counter -= 1 # release the GIL - before = rffi.aroundstate.before - if before: before() + rgil.release() # return res """ % {'args': ', '.join(['arg%d' % i for i in range(len(argtypes))])}) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -599,22 +599,10 @@ def hlinvoke(repr, llcallable, *args): raise TypeError("hlinvoke is meant to be rtyped and not called direclty") -def invoke_around_extcall(before, after): - """Call before() before any external function call, and after() after. - At the moment only one pair before()/after() can be registered at a time. +def is_in_callback(): + """Returns True if we're currently in a callback *or* if there are + multiple threads around. """ - # NOTE: the hooks are cleared during translation! To be effective - # in a compiled program they must be set at run-time. - from rpython.rtyper.lltypesystem import rffi - rffi.aroundstate.before = before - rffi.aroundstate.after = after - # the 'aroundstate' contains regular function and not ll pointers to them, - # but let's call llhelper() anyway to force their annotation - from rpython.rtyper.annlowlevel import llhelper - llhelper(rffi.AroundFnPtr, before) - llhelper(rffi.AroundFnPtr, after) - -def is_in_callback(): from rpython.rtyper.lltypesystem import rffi return rffi.stackcounter.stacks_counter > 1 diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -186,7 +186,13 @@ """ if not obj: return False - return can_move(obj) + # XXX returning can_move() here might acidentally work for the use + # cases (see issue #2212), but this is not really safe. Now we + # just return True for any non-NULL pointer, and too bad for the + # few extra 'cond_call_gc_wb'. It could be improved e.g. to return + # False if 'obj' is a static prebuilt constant, or if we're not + # running incminimark... + return True #can_move(obj) def _heap_stats(): raise NotImplementedError # can't be run directly diff --git a/rpython/rlib/rgil.py b/rpython/rlib/rgil.py --- a/rpython/rlib/rgil.py +++ b/rpython/rlib/rgil.py @@ -2,6 +2,7 @@ from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.extregistry import ExtRegistryEntry # these functions manipulate directly the GIL, whose definition does not # escape the C code itself @@ -10,27 +11,135 @@ eci = ExternalCompilationInfo( includes = ['src/thread.h'], separate_module_files = [translator_c_dir / 'src' / 'thread.c'], - include_dirs = [translator_c_dir]) + include_dirs = [translator_c_dir], + post_include_bits = ['#define RPY_WITH_GIL']) llexternal = rffi.llexternal -gil_allocate = llexternal('RPyGilAllocate', [], lltype.Void, +_gil_allocate = llexternal('RPyGilAllocate', [], lltype.Void, + _nowrapper=True, sandboxsafe=True, + compilation_info=eci) + +_gil_yield_thread = llexternal('RPyGilYieldThread', [], lltype.Signed, _nowrapper=True, sandboxsafe=True, compilation_info=eci) -gil_yield_thread = llexternal('RPyGilYieldThread', [], lltype.Signed, +_gil_release = llexternal('RPyGilRelease', [], lltype.Void, _nowrapper=True, sandboxsafe=True, compilation_info=eci) -gil_release = llexternal('RPyGilRelease', [], lltype.Void, - _nowrapper=True, sandboxsafe=True, - compilation_info=eci) - -gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void, +_gil_acquire = llexternal('RPyGilAcquire', [], lltype.Void, _nowrapper=True, sandboxsafe=True, compilation_info=eci) gil_fetch_fastgil = llexternal('RPyFetchFastGil', [], llmemory.Address, _nowrapper=True, sandboxsafe=True, compilation_info=eci) + +# ____________________________________________________________ + + +def invoke_after_thread_switch(callback): + """Invoke callback() after a thread switch. + + This is a hook used by pypy.module.signal. Several callbacks should + be easy to support (but not right now). + + This function should be called from the translated RPython program + (i.e. *not* at module level!), but registers the callback + statically. The exact point at which invoke_after_thread_switch() + is called has no importance: the callback() will be called anyway. + """ + print "NOTE: invoke_after_thread_switch() is meant to be translated " + print "and not called directly. Using some emulation." + global _emulated_after_thread_switch + _emulated_after_thread_switch = callback + +_emulated_after_thread_switch = None + +def _after_thread_switch(): + """NOT_RPYTHON""" + if _emulated_after_thread_switch is not None: + _emulated_after_thread_switch() + + +class Entry(ExtRegistryEntry): + _about_ = invoke_after_thread_switch + + def compute_result_annotation(self, s_callback): + assert s_callback.is_constant() + callback = s_callback.const + bk = self.bookkeeper + translator = bk.annotator.translator + if hasattr(translator, '_rgil_invoke_after_thread_switch'): + assert translator._rgil_invoke_after_thread_switch == callback, ( + "not implemented yet: several invoke_after_thread_switch()") + else: + translator._rgil_invoke_after_thread_switch = callback + bk.emulate_pbc_call("rgil.invoke_after_thread_switch", s_callback, []) + + def specialize_call(self, hop): + # the actual call is not done here + hop.exception_cannot_occur() + +class Entry(ExtRegistryEntry): + _about_ = _after_thread_switch + + def compute_result_annotation(self): + # the call has been emulated already in invoke_after_thread_switch() + pass + + def specialize_call(self, hop): + translator = hop.rtyper.annotator.translator + if hasattr(translator, '_rgil_invoke_after_thread_switch'): + func = translator._rgil_invoke_after_thread_switch + graph = translator._graphof(func) + llfn = hop.rtyper.getcallable(graph) + c_callback = hop.inputconst(lltype.typeOf(llfn), llfn) + hop.exception_is_here() + hop.genop("direct_call", [c_callback]) + else: + hop.exception_cannot_occur() + + +def allocate(): + _gil_allocate() + +def release(): + # this function must not raise, in such a way that the exception + # transformer knows that it cannot raise! + _gil_release() +release._gctransformer_hint_cannot_collect_ = True +release._dont_reach_me_in_del_ = True + +def acquire(): + from rpython.rlib import rthread + _gil_acquire() + rthread.gc_thread_run() + _after_thread_switch() +acquire._gctransformer_hint_cannot_collect_ = True +acquire._dont_reach_me_in_del_ = True + +# The _gctransformer_hint_cannot_collect_ hack is needed for +# translations in which the *_external_call() functions are not inlined. +# They tell the gctransformer not to save and restore the local GC +# pointers in the shadow stack. This is necessary because the GIL is +# not held after the call to gil.release() or before the call +# to gil.acquire(). + +def yield_thread(): + # explicitly release the gil, in a way that tries to give more + # priority to other threads (as opposed to continuing to run in + # the same thread). + if _gil_yield_thread(): + from rpython.rlib import rthread + rthread.gc_thread_run() + _after_thread_switch() +yield_thread._gctransformer_hint_close_stack_ = True +yield_thread._dont_reach_me_in_del_ = True +yield_thread._dont_inline_ = True + +# yield_thread() needs a different hint: _gctransformer_hint_close_stack_. +# The *_external_call() functions are themselves called only from the rffi +# module from a helper function that also has this hint. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -234,9 +234,16 @@ includes = ['io.h', 'sys/utime.h', 'sys/types.h'] libraries = [] else: + if sys.platform.startswith(('darwin', 'netbsd', 'openbsd')): + _ptyh = 'util.h' + elif sys.platform.startswith('freebsd'): + _ptyh = 'libutil.h' + else: + _ptyh = 'pty.h' includes = ['unistd.h', 'sys/types.h', 'sys/wait.h', 'utime.h', 'sys/time.h', 'sys/times.h', - 'grp.h', 'dirent.h'] + 'grp.h', 'dirent.h', 'sys/stat.h', 'fcntl.h', + 'signal.h', 'sys/utsname.h', _ptyh] libraries = ['util'] eci = ExternalCompilationInfo( includes=includes, @@ -1269,7 +1276,8 @@ if not _WIN32: TMSP = lltype.Ptr(TMS) c_times = external('times', [TMSP], CLOCK_T, - save_err=rffi.RFFI_SAVE_ERRNO) + save_err=rffi.RFFI_SAVE_ERRNO | + rffi.RFFI_ZERO_ERRNO_BEFORE) # Here is a random extra platform parameter which is important. # Strictly speaking, this should probably be retrieved at runtime, not @@ -1291,7 +1299,13 @@ if not _WIN32: l_tmsbuf = lltype.malloc(TMSP.TO, flavor='raw') try: - result = handle_posix_error('times', c_times(l_tmsbuf)) + # note: times() can return a negative value (or even -1) + # even if there is no error + result = widen(c_times(l_tmsbuf)) + if result == -1: + errno = get_saved_errno() + if errno != 0: + raise OSError(errno, 'times() failed') return ( rffi.cast(lltype.Signed, l_tmsbuf.c_tms_utime) / CLOCK_TICKS_PER_SECOND, @@ -1607,7 +1621,8 @@ #___________________________________________________________________ c_chroot = external('chroot', [rffi.CCHARP], rffi.INT, - save_err=rffi.RFFI_SAVE_ERRNO) + save_err=rffi.RFFI_SAVE_ERRNO, + macro=_MACRO_ON_POSIX) @replace_os_function('chroot') def chroot(path): diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -5,11 +5,16 @@ import os, sys +from rpython.flowspace.model import Constant +from rpython.flowspace.operation import op from rpython.annotator import model as annmodel from rpython.rtyper import extregistry from rpython.tool.pairtype import pairtype from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.llannotation import lltype_to_annotation +from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rint import IntegerRepr +from rpython.rtyper.error import TyperError from rpython.rlib.objectmodel import specialize from rpython.rtyper.lltypesystem import lltype, rffi @@ -23,7 +28,7 @@ _LINUX = sys.platform.startswith('linux') if _WIN32: - from rpython.rlib import rwin32 + from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits # Support for float times is here. @@ -84,8 +89,7 @@ knowntype = os.stat_result def rtyper_makerepr(self, rtyper): - from rpython.rlib import _rposix_repr - return _rposix_repr.StatResultRepr(rtyper) + return StatResultRepr(rtyper) def rtyper_makekey(self): return self.__class__, @@ -111,6 +115,83 @@ return s_reduced, stat_result_reduce, stat_result_recreate +class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): + def getitem((s_sta, s_int)): + assert s_int.is_constant(), "os.stat()[index]: index must be constant" + index = s_int.const + assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" + name, TYPE = STAT_FIELDS[index] + return lltype_to_annotation(TYPE) + + +class StatResultRepr(Repr): + + def __init__(self, rtyper): + self.rtyper = rtyper + self.stat_field_indexes = {} + for i, (name, TYPE) in enumerate(STAT_FIELDS): + self.stat_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple( + [lltype_to_annotation(TYPE) for name, TYPE in STAT_FIELDS]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.stat_field_indexes[attr] + except KeyError: + raise TyperError("os.stat().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) + +s_StatResult = SomeStatResult() + +def make_stat_result(tup): + """Turn a tuple into an os.stat_result object.""" + positional = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STAT_FIELDS)[:N_INDEXABLE_FIELDS]) + kwds = {} + for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]: + kwds[name] = lltype.cast_primitive(TYPE, value) + return os.stat_result(positional, kwds) + + +class MakeStatResultEntry(extregistry.ExtRegistryEntry): + _about_ = make_stat_result + + def compute_result_annotation(self, s_tup): + return s_StatResult + + def specialize_call(self, hop): + r_StatResult = hop.rtyper.getrepr(s_StatResult) + [v_result] = hop.inputargs(r_StatResult.r_tuple) + # no-op conversion from r_StatResult.r_tuple to r_StatResult + hop.exception_cannot_occur() + return v_result + + class SomeStatvfsResult(annmodel.SomeObject): if hasattr(os, 'statvfs_result'): knowntype = os.statvfs_result @@ -118,8 +199,7 @@ knowntype = None # will not be used def rtyper_makerepr(self, rtyper): - from rpython.rlib import _rposix_repr - return _rposix_repr.StatvfsResultRepr(rtyper) + return StatvfsResultRepr(rtyper) def rtyper_makekey(self): return self.__class__, @@ -130,15 +210,6 @@ return lltype_to_annotation(TYPE) -class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): - def getitem((s_sta, s_int)): - assert s_int.is_constant(), "os.stat()[index]: index must be constant" - index = s_int.const - assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" - name, TYPE = STAT_FIELDS[index] - return lltype_to_annotation(TYPE) - - class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): def getitem((s_stat, s_int)): assert s_int.is_constant() @@ -146,33 +217,55 @@ return lltype_to_annotation(TYPE) -s_StatResult = SomeStatResult() s_StatvfsResult = SomeStatvfsResult() -def make_stat_result(tup): - """Turn a tuple into an os.stat_result object.""" - positional = tup[:N_INDEXABLE_FIELDS] - kwds = {} - for i, name in enumerate(STAT_FIELD_NAMES[N_INDEXABLE_FIELDS:]): - kwds[name] = tup[N_INDEXABLE_FIELDS + i] - return os.stat_result(positional, kwds) +class StatvfsResultRepr(Repr): + def __init__(self, rtyper): + self.rtyper = rtyper + self.statvfs_field_indexes = {} + for i, (name, TYPE) in enumerate(STATVFS_FIELDS): + self.statvfs_field_indexes[name] = i + + self.s_tuple = annmodel.SomeTuple( + [lltype_to_annotation(TYPE) for name, TYPE in STATVFS_FIELDS]) + self.r_tuple = rtyper.getrepr(self.s_tuple) + self.lowleveltype = self.r_tuple.lowleveltype + + def redispatch_getfield(self, hop, index): + rtyper = self.rtyper + s_index = rtyper.annotator.bookkeeper.immutablevalue(index) + hop2 = hop.copy() + spaceop = op.getitem(hop.args_v[0], Constant(index)) + spaceop.result = hop.spaceop.result + hop2.spaceop = spaceop + hop2.args_v = spaceop.args + hop2.args_s = [self.s_tuple, s_index] + hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] + return hop2.dispatch() + + def rtype_getattr(self, hop): + s_attr = hop.args_s[1] + attr = s_attr.const + try: + index = self.statvfs_field_indexes[attr] + except KeyError: + raise TyperError("os.statvfs().%s: field not available" % (attr,)) + return self.redispatch_getfield(hop, index) + + +class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): + def rtype_getitem((r_sta, r_int), hop): + s_int = hop.args_s[1] + index = s_int.const + return r_sta.redispatch_getfield(hop, index) def make_statvfs_result(tup): - return os.statvfs_result(tup) - - -class MakeStatResultEntry(extregistry.ExtRegistryEntry): - _about_ = make_stat_result - - def compute_result_annotation(self, s_tup): - return s_StatResult - - def specialize_call(self, hop): - from rpython.rlib import _rposix_repr - return _rposix_repr.specialize_make_stat_result(hop) - + args = tuple( + lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in + zip(tup, STATVFS_FIELDS)) + return os.statvfs_result(args) class MakeStatvfsResultEntry(extregistry.ExtRegistryEntry): _about_ = make_statvfs_result @@ -181,8 +274,10 @@ return s_StatvfsResult def specialize_call(self, hop): - from rpython.rlib import _rposix_repr - return _rposix_repr.specialize_make_statvfs_result(hop) + r_StatvfsResult = hop.rtyper.getrepr(s_StatvfsResult) + [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) + hop.exception_cannot_occur() + return v_result # ____________________________________________________________ # diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -79,7 +79,12 @@ @specialize.arg(0) def ll_start_new_thread(func): + from rpython.rlib import rgil _check_thread_enabled() + rgil.allocate() + # ^^^ convenience: any RPython program which uses explicitly + # rthread.start_new_thread() will initialize the GIL at that + # point. ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -165,9 +165,11 @@ globals().update(rffi_platform.configure(CConfigForClockGetTime)) TIMESPEC = TIMESPEC CLOCK_PROCESS_CPUTIME_ID = CLOCK_PROCESS_CPUTIME_ID + eci_with_lrt = eci.merge(ExternalCompilationInfo(libraries=['rt'])) c_clock_gettime = external('clock_gettime', [lltype.Signed, lltype.Ptr(TIMESPEC)], - rffi.INT, releasegil=False) + rffi.INT, releasegil=False, + compilation_info=eci_with_lrt) else: RUSAGE = RUSAGE RUSAGE_SELF = RUSAGE_SELF or 0 diff --git a/rpython/rlib/test/test_rgil.py b/rpython/rlib/test/test_rgil.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rgil.py @@ -0,0 +1,47 @@ +from rpython.rlib import rgil +from rpython.translator.c.test.test_standalone import StandaloneTests + + +class BaseTestGIL(StandaloneTests): + + def test_simple(self): + def main(argv): + rgil.release() + # don't have the GIL here + rgil.acquire() + rgil.yield_thread() + print "OK" # there is also a release/acquire pair here + return 0 + + main([]) + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert data == "OK\n" + + def test_after_thread_switch(self): + class Foo: + pass + foo = Foo() + foo.counter = 0 + def seeme(): + foo.counter += 1 + def main(argv): + rgil.invoke_after_thread_switch(seeme) + print "Test" # one release/acquire pair here + print foo.counter + print foo.counter + return 0 + + t, cbuilder = self.compile(main) + data = cbuilder.cmdexec('') + assert data == "Test\n1\n2\n" + + +class TestGILAsmGcc(BaseTestGIL): + gc = 'minimark' + gcrootfinder = 'asmgcc' + +class TestGILShadowStack(BaseTestGIL): + gc = 'minimark' + gcrootfinder = 'shadowstack' diff --git a/rpython/rlib/test/test_rposix_stat.py b/rpython/rlib/test/test_rposix_stat.py --- a/rpython/rlib/test/test_rposix_stat.py +++ b/rpython/rlib/test/test_rposix_stat.py @@ -32,7 +32,11 @@ fname = udir.join('test_stat_large_number.txt') fname.ensure() t1 = 5000000000.0 - os.utime(str(fname), (t1, t1)) + try: + os.utime(str(fname), (t1, t1)) + except OverflowError: + py.test.skip("This platform doesn't support setting stat times " + "to large values") assert rposix_stat.stat(str(fname)).st_mtime == t1 @py.test.mark.skipif(not hasattr(os, 'statvfs'), diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -143,7 +143,7 @@ def test_simple_tcp(): - import thread + from rpython.rlib import rthread sock = RSocket() try_ports = [1023] + range(20000, 30000, 437) for port in try_ports: @@ -169,14 +169,14 @@ connected[0] = True finally: lock.release() - lock = thread.allocate_lock() - lock.acquire() - thread.start_new_thread(connecting, ()) + lock = rthread.allocate_lock() + lock.acquire(True) + rthread.start_new_thread(connecting, ()) print 'waiting for connection' fd1, addr2 = sock.accept() s1 = RSocket(fd=fd1) print 'connection accepted' - lock.acquire() + lock.acquire(True) assert connected[0] print 'connecting side knows that the connection was accepted too' assert addr.eq(s2.getpeername()) @@ -188,7 +188,9 @@ buf = s2.recv(100) assert buf == '?' print 'received ok' - thread.start_new_thread(s2.sendall, ('x'*50000,)) + def sendstuff(): + s2.sendall('x'*50000) + rthread.start_new_thread(sendstuff, ()) buf = '' while len(buf) < 50000: data = s1.recv(50100) diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -5,13 +5,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi import py -def setup_module(mod): - # Hack to avoid a deadlock if the module is run after other test files :-( - # In this module, we assume that rthread.start_new_thread() is not - # providing us with a GIL equivalent, except in test_gc_locking - # which installs its own aroundstate. - rffi.aroundstate._cleanup_() - def test_lock(): l = allocate_lock() ok1 = l.acquire(True) @@ -31,6 +24,7 @@ py.test.fail("Did not raise") def test_tlref_untranslated(): + import thread class FooBar(object): pass t = ThreadLocalReference(FooBar) @@ -43,7 +37,7 @@ time.sleep(0.2) results.append(t.get() is x) for i in range(5): - start_new_thread(subthread, ()) + thread.start_new_thread(subthread, ()) time.sleep(0.5) assert results == [True] * 15 @@ -99,7 +93,6 @@ def test_gc_locking(self): import time - from rpython.rlib.objectmodel import invoke_around_extcall from rpython.rlib.debug import ll_assert class State: @@ -123,17 +116,6 @@ ll_assert(j == self.j, "2: bad j") run._dont_inline_ = True - def before_extcall(): - release_NOAUTO(state.gil) - before_extcall._gctransformer_hint_cannot_collect_ = True - # ^^^ see comments in gil.py about this hint - - def after_extcall(): - acquire_NOAUTO(state.gil, True) - gc_thread_run() - after_extcall._gctransformer_hint_cannot_collect_ = True - # ^^^ see comments in gil.py about this hint - def bootstrap(): # after_extcall() is called before we arrive here. # We can't just acquire and release the GIL manually here, @@ -154,14 +136,9 @@ start_new_thread(bootstrap, ()) def f(): - state.gil = allocate_ll_lock() - acquire_NOAUTO(state.gil, True) state.bootstrapping = allocate_lock() state.answers = [] state.finished = 0 - # the next line installs before_extcall() and after_extcall() - # to be called automatically around external function calls. - invoke_around_extcall(before_extcall, after_extcall) g(10, 1) done = False @@ -179,10 +156,7 @@ return len(state.answers) expected = 89 - try: - fn = self.getcompiled(f, []) - finally: - rffi.aroundstate._cleanup_() + fn = self.getcompiled(f, []) answers = fn() assert answers == expected diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -169,9 +169,9 @@ argnames = ', '.join(['a%d' % i for i in range(len(args))]) source = py.code.Source(""" + from rpython.rlib import rgil def call_external_function(%(argnames)s): - before = aroundstate.before - if before: before() + rgil.release() # NB. it is essential that no exception checking occurs here! if %(save_err)d: from rpython.rlib import rposix @@ -180,12 +180,10 @@ if %(save_err)d: from rpython.rlib import rposix rposix._errno_after(%(save_err)d) - after = aroundstate.after - if after: after() + rgil.acquire() return res """ % locals()) - miniglobals = {'aroundstate': aroundstate, - 'funcptr': funcptr, + miniglobals = {'funcptr': funcptr, '__name__': __name__, # for module name propagation } exec source.compile() in miniglobals @@ -205,7 +203,7 @@ # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function else: - # if we don't have to invoke the aroundstate, we can just call + # if we don't have to invoke the GIL handling, we can just call # the low-level function pointer carelessly if macro is None and save_err == RFFI_ERR_NONE: call_external_function = funcptr @@ -270,13 +268,10 @@ freeme = arg elif _isfunctype(TARGET) and not _isllptr(arg): # XXX pass additional arguments - if invoke_around_handlers: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder, - aroundstate)) - else: - arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, - callbackholder)) + use_gil = invoke_around_handlers + arg = llhelper(TARGET, _make_wrapper_for(TARGET, arg, + callbackholder, + use_gil)) else: SOURCE = lltype.typeOf(arg) if SOURCE != TARGET: @@ -315,7 +310,7 @@ def __init__(self): self.callbacks = {} -def _make_wrapper_for(TP, callable, callbackholder=None, aroundstate=None): +def _make_wrapper_for(TP, callable, callbackholder, use_gil): """ Function creating wrappers for callbacks. Note that this is cheating as we assume constant callbacks and we just memoize wrappers """ @@ -330,11 +325,13 @@ callbackholder.callbacks[callable] = True args = ', '.join(['a%d' % i for i in range(len(TP.TO.ARGS))]) source = py.code.Source(r""" + rgil = None + if use_gil: + from rpython.rlib import rgil + def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple - if aroundstate is not None: - after = aroundstate.after - if after: - after() + if rgil is not None: + rgil.acquire() # from now on we hold the GIL stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -349,13 +346,11 @@ traceback.print_exc() result = errorcode stackcounter.stacks_counter -= 1 - if aroundstate is not None: - before = aroundstate.before - if before: - before() + if rgil is not None: + rgil.release() # here we don't hold the GIL any more. As in the wrapper() produced # by llexternal, it is essential that no exception checking occurs - # after the call to before(). + # after the call to rgil.release(). return result """ % locals()) miniglobals = locals().copy() @@ -369,13 +364,6 @@ AroundFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Void)) -class AroundState: - def _cleanup_(self): - self.before = None # or a regular RPython function - self.after = None # or a regular RPython function -aroundstate = AroundState() -aroundstate._cleanup_() - class StackCounter: def _cleanup_(self): self.stacks_counter = 0 # number of "stack pieces": callbacks diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -688,42 +688,6 @@ assert interpret(f, []) == 4 - def test_around_extcall(self): - if sys.platform == "win32": - py.test.skip('No pipes on windows') - import os - from rpython.annotator import model as annmodel - from rpython.rlib.objectmodel import invoke_around_extcall - from rpython.rtyper.extfuncregistry import register_external - read_fd, write_fd = os.pipe() - try: - # we need an external function that is not going to get wrapped around - # before()/after() calls, in order to call it from before()/after()... - def mywrite(s): - os.write(write_fd, s) - def llimpl(s): - s = ''.join(s.chars) - os.write(write_fd, s) - register_external(mywrite, [str], annmodel.s_None, 'll_mywrite', - llfakeimpl=llimpl, sandboxsafe=True) - - def before(): - mywrite("B") - def after(): - mywrite("A") - def f(): - os.write(write_fd, "-") - invoke_around_extcall(before, after) - os.write(write_fd, "E") - - interpret(f, []) - data = os.read(read_fd, 99) - assert data == "-BEA" - - finally: - os.close(write_fd) - os.close(read_fd) - def test_external_callable(self): """ Try to call some llexternal function with llinterp """ diff --git a/rpython/translator/backendopt/test/test_malloc.py b/rpython/translator/backendopt/test/test_malloc.py --- a/rpython/translator/backendopt/test/test_malloc.py +++ b/rpython/translator/backendopt/test/test_malloc.py @@ -159,7 +159,7 @@ def __del__(self): delcalls[0] += 1 - os.write(1, "__del__\n") + #os.write(1, "__del__\n") def f(x=int): a = A() diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -33,6 +33,10 @@ # include #endif +#ifdef RPY_WITH_GIL +# include +#endif + RPY_EXTERN int pypy_main_function(int argc, char *argv[]) @@ -46,6 +50,14 @@ _setmode(1, _O_BINARY); #endif +#ifdef RPY_WITH_GIL + /* Note that the GIL's mutexes are not automatically made; if the + program starts threads, it needs to call rgil.gil_allocate(). + RPyGilAcquire() still works without that, but crash if it finds + that it really needs to wait on a mutex. */ + RPyGilAcquire(); +#endif + #ifdef PYPY_USE_ASMGCC pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; #endif @@ -82,6 +94,10 @@ pypy_malloc_counters_results(); +#ifdef RPY_WITH_GIL + RPyGilRelease(); +#endif + return exitcode; memory_out: diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -120,11 +120,8 @@ got += 1; fd = ((void* *) (((char *)fd) + sizeof(void*)))[0]; } - if (rpy_fastgil != 1) { - RPyAssert(rpy_fastgil != 0, - "pypy_check_stack_count doesn't have the GIL"); - got++; /* <= the extra one currently stored in rpy_fastgil */ - } + RPyAssert(rpy_fastgil == 1, + "pypy_check_stack_count doesn't have the GIL"); RPyAssert(got == stacks_counter - 1, "bad stacks_counter or non-closed stacks around"); # endif diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -28,7 +28,8 @@ RPY_EXTERN void RPyGilAllocate(void); RPY_EXTERN long RPyGilYieldThread(void); -RPY_EXTERN void RPyGilAcquire(void); +RPY_EXTERN void RPyGilAcquireSlowPath(long); From pypy.commits at gmail.com Fri Dec 18 10:52:07 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 07:52:07 -0800 (PST) Subject: [pypy-commit] pypy default: Skip this test on 32-bit Message-ID: <56742ba7.ca881c0a.8e72e.1d1f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81377:1cc0914334cd Date: 2015-12-18 15:51 +0000 http://bitbucket.org/pypy/pypy/changeset/1cc0914334cd/ Log: Skip this test on 32-bit diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): From pypy.commits at gmail.com Fri Dec 18 10:56:48 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 07:56:48 -0800 (PST) Subject: [pypy-commit] pypy default: Same as 1cc0914334cd Message-ID: <56742cc0.42ddc20a.f9258.04fb@mx.google.com> Author: Armin Rigo Branch: Changeset: r81378:b37b96b20438 Date: 2015-12-18 15:56 +0000 http://bitbucket.org/pypy/pypy/changeset/b37b96b20438/ Log: Same as 1cc0914334cd diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.tool.udir import udir -import os +import os, sys, py class AppTestMMap: spaceconfig = dict(usemodules=('mmap',)) @@ -8,6 +8,15 @@ def setup_class(cls): cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def setup_method(self, meth): + if getattr(meth, 'is_large', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") + def test_page_size(self): import mmap assert mmap.PAGESIZE > 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py From pypy.commits at gmail.com Fri Dec 18 11:00:28 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 08:00:28 -0800 (PST) Subject: [pypy-commit] pypy default: Same as 1cc0914334cd Message-ID: <56742d9c.d4e41c0a.f5298.ffffa000@mx.google.com> Author: Armin Rigo Branch: Changeset: r81379:364ca56e0027 Date: 2015-12-18 15:59 +0000 http://bitbucket.org/pypy/pypy/changeset/364ca56e0027/ Log: Same as 1cc0914334cd diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): From pypy.commits at gmail.com Fri Dec 18 11:48:02 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 08:48:02 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <567438c2.6650c20a.a11d6.ffffbb72@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r680:72fceeba1ecb Date: 2015-12-18 17:47 +0100 http://bitbucket.org/pypy/pypy.org/changeset/72fceeba1ecb/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61605 of $105000 (58.7%) + $61615 of $105000 (58.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $52728 of $60000 (87.9%) + $52747 of $60000 (87.9%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30224 of $80000 (37.8%) + $30234 of $80000 (37.8%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Fri Dec 18 11:49:56 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 08:49:56 -0800 (PST) Subject: [pypy-commit] pypy default: Before resetting SIGALRM, use alarm(0) to make sure pending alarms are cancelled Message-ID: <56743934.6a69c20a.d649d.ffffae86@mx.google.com> Author: Armin Rigo Branch: Changeset: r81380:10def85dd7bc Date: 2015-12-18 17:49 +0100 http://bitbucket.org/pypy/pypy/changeset/10def85dd7bc/ Log: Before resetting SIGALRM, use alarm(0) to make sure pending alarms are cancelled diff --git a/rpython/rlib/test/test_streamio.py b/rpython/rlib/test/test_streamio.py --- a/rpython/rlib/test/test_streamio.py +++ b/rpython/rlib/test/test_streamio.py @@ -1077,6 +1077,7 @@ alarm(1) assert file.read(10) == "hello" finally: + alarm(0) signal(SIGALRM, SIG_DFL) def test_write_interrupted(self): @@ -1102,6 +1103,7 @@ # can succeed. file.write("hello") finally: + alarm(0) signal(SIGALRM, SIG_DFL) def test_append_mode(self): From pypy.commits at gmail.com Fri Dec 18 12:33:51 2015 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Dec 2015 09:33:51 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Replace FuncCode.funcgens list with single .funcgen, since there is never more than one Message-ID: <5674437f.2269c20a.7eae7.ffffbee5@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81381:70a2702ce64e Date: 2015-12-18 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/70a2702ce64e/ Log: Replace FuncCode.funcgens list with single .funcgen, since there is never more than one diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -815,39 +815,38 @@ return self.name def make_funcgens(self): - self.funcgens = select_function_code_generators(self.obj, self.db, self.name) - if self.funcgens: - argnames = self.funcgens[0].argnames() #Assume identical for all funcgens + self.funcgen = select_function_code_generators(self.obj, self.db, self.name) + if self.funcgen: + argnames = self.funcgen.argnames() self.implementationtypename = self.db.gettype(self.T, argnames=argnames) - self._funccodegen_owner = self.funcgens[0] - else: - self._funccodegen_owner = None + + self._funccodegen_owner = self.funcgen def basename(self): return self.obj._name def enum_dependencies(self): - if not self.funcgens: + if self.funcgen is None: return [] - return self.funcgens[0].allconstantvalues() #Assume identical for all funcgens + return self.funcgen.allconstantvalues() def forward_declaration(self): callable = getattr(self.obj, '_callable', None) is_exported = getattr(callable, 'exported_symbol', False) - for funcgen in self.funcgens: + if self.funcgen: yield '%s;' % ( forward_cdecl(self.implementationtypename, - funcgen.name(self.name), self.db.standalone, + self.funcgen.name(self.name), self.db.standalone, is_exported=is_exported)) def implementation(self): - for funcgen in self.funcgens: - for s in self.funcgen_implementation(funcgen): + if self.funcgen: + for s in self.funcgen_implementation(self.funcgen): yield s def graphs_to_patch(self): - for funcgen in self.funcgens: - for i in funcgen.graphs_to_patch(): + if self.funcgen: + for i in self.funcgen.graphs_to_patch(): yield i def funcgen_implementation(self, funcgen): @@ -899,12 +898,11 @@ def sandbox_stub(fnobj, db): # unexpected external function for --sandbox translation: replace it - # with a "Not Implemented" stub. To support these functions, port them - # to the new style registry (e.g. rpython.module.ll_os.RegisterOs). + # with a "Not Implemented" stub. from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db, force_stub=True) - return [FunctionCodeGenerator(graph, db)] + return FunctionCodeGenerator(graph, db) def sandbox_transform(fnobj, db): # for --sandbox: replace a function like os_open_llimpl() with @@ -912,7 +910,7 @@ # perform the operation. from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) - return [FunctionCodeGenerator(graph, db)] + return FunctionCodeGenerator(graph, db) def select_function_code_generators(fnobj, db, functionname): sandbox = db.need_sandboxing(fnobj) @@ -921,16 +919,16 @@ # apply the sandbox transformation return sandbox_transform(fnobj, db) exception_policy = getattr(fnobj, 'exception_policy', None) - return [FunctionCodeGenerator(fnobj.graph, db, exception_policy, - functionname)] + return FunctionCodeGenerator( + fnobj.graph, db, exception_policy, functionname) elif getattr(fnobj, 'external', None) is not None: if sandbox: return sandbox_stub(fnobj, db) else: assert fnobj.external == 'C' - return [] + return None elif hasattr(fnobj._callable, "c_name"): - return [] # this case should only be used for entrypoints + return None # this case should only be used for entrypoints else: raise ValueError("don't know how to generate code for %r" % (fnobj,)) From pypy.commits at gmail.com Fri Dec 18 12:43:44 2015 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Dec 2015 09:43:44 -0800 (PST) Subject: [pypy-commit] pypy exctrans: remove dead import Message-ID: <567445d0.a415c20a.4d933.ffffcae2@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81382:dd8a27331105 Date: 2015-12-18 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/dd8a27331105/ Log: remove dead import diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -4,7 +4,6 @@ _subarray) from rpython.rtyper.lltypesystem import llmemory, llgroup from rpython.translator.c.funcgen import FunctionCodeGenerator -from rpython.translator.c.external import CExternalFunctionCodeGenerator from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring from rpython.translator.c.support import cdecl, forward_cdecl, somelettersfrom from rpython.translator.c.support import c_char_array_constant, barebonearray From pypy.commits at gmail.com Fri Dec 18 13:57:00 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 10:57:00 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Write more tests (that are passing) and refactor a little bit setslice() Message-ID: <567456fc.a658c20a.49ee6.ffffe975@mx.google.com> Author: Armin Rigo Branch: fix-2198 Changeset: r81383:83cd07a3e10e Date: 2015-12-18 19:51 +0100 http://bitbucket.org/pypy/pypy/changeset/83cd07a3e10e/ Log: Write more tests (that are passing) and refactor a little bit setslice() with the JIT in mind and some more comments about corner cases diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1468,47 +1468,47 @@ return len2 = w_other.length() - if slicelength == 0 and len2 == 0: - return # shortcut, really there's nothing to do items = self.unerase(w_list.lstorage) if step == 1: # Support list resizing for non-extended slices - len1 = w_list.length() + len1 = len(items) # Ensure non-negative slicing - if start <= -len1: - start = 0 - elif start < 0: + if start < 0: start += len1 + if start < 0: + start = 0 assert start >= 0 - if len2 == 0 and slicelength > 0: # shortcut, we already did all that was needed + if len2 == 0: # shortcut, we already did all that was needed + # (this condition is needed to avoid self.unerase() on + # w_other in the next line: if len2 == 0 then it might + # be any strategy; only if it is not 0 do we check that + # it is the same strategy as 'self') del items[start:start + slicelength] else: items[start:start + slicelength] = self.unerase(w_other.lstorage) return - elif len2 != slicelength: # No resize for extended slices + + # The rest is the case 'step != 1' + + if len2 != slicelength: # No resize for extended slices raise oefmt(self.space.w_ValueError, "attempt to assign sequence of size %d to extended " "slice of size %d", len2, slicelength) - if len2 == 0: - other_items = [] - else: - # at this point both w_list and w_other have the same type, so - # self.unerase is valid for both of them - other_items = self.unerase(w_other.lstorage) + if len2 == 0: # == slicelength: shortcut, really there's nothing to do + return + # at this point both w_list and w_other have the same type, so + # self.unerase is valid for both of them + other_items = self.unerase(w_other.lstorage) if other_items is items: - if step > 1: - # Always copy starting from the right to avoid - # having to make a shallow copy in the case where - # the source and destination lists are the same list. - i = len2 - 1 - start += i * step - while i >= 0: - items[start] = other_items[i] - start -= step - i -= 1 - else: # step can only be -1 here, so it's equivalent to : - assert step == -1 - w_list.reverse() + # 'l[x:y:step] = l', with step != 1 and len(l) != 0. + + # This is very obscure, but w_list.reverse() is correct + # here for all remaining cases, I believe. It handles the + # case 'l[::-1] = l'. Otherwise, if abs(step) > 1 but still + # len(l) == len2 == slicelength, then the only possibility + # left is that len(l) == 1, and reverse() has no effect. + assert step == -1 or len2 == 1 + w_list.reverse() return for i in range(len2): items[start] = other_items[i] diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1079,6 +1079,21 @@ b[i:i+1] = ['y'] assert b == ['y'] * count + def test_setslice_corner_case_1(self): + lst = [5, 6, 7, 8] + lst[2:3] = range(2) + assert lst == [5, 6, 0, 1, 8] + + def test_setslice_corner_case_2(self): + lst = [5] + lst[0:-10:-10] = lst + assert lst == [5] + + def test_setslice_corner_case_3(self): + lst = [5] + lst[0:10:10] = lst + assert lst == [5] + def test_recursive_repr(self): l = [] assert repr(l) == '[]' From pypy.commits at gmail.com Fri Dec 18 14:28:42 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 11:28:42 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Two tests for the two cases of ll_listsetslice() which resizes the lists. Message-ID: <56745e6a.42ddc20a.f9258.5687@mx.google.com> Author: Armin Rigo Branch: fix-2198 Changeset: r81384:8db9bd041275 Date: 2015-12-18 20:27 +0100 http://bitbucket.org/pypy/pypy/changeset/8db9bd041275/ Log: Two tests for the two cases of ll_listsetslice() which resizes the lists. Both fail :-( diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -136,6 +136,27 @@ del expected[start:stop] self.check_list(l, expected) + def test_rlist_setslice_resizing(self): + for start in range(5): + for stop in range(start, 5): + for len2 in range(5): + l1 = self.sample_list() # [42, 43, 44, 45] + l2 = self.sample_list() + ll_listdelslice_startonly(l2, len2) # initial slice + ll_listsetslice(l1, start, stop, l2) + expected = [42, 43, 44, 45] + expected[start:stop] = [42, 43, 44, 45][:len2] + self.check_list(l1, expected) + + def test_rlist_setslice_overlapping(self): + for start in range(5): + for stop in range(start, 5): + l1 = self.sample_list() # [42, 43, 44, 45] + ll_listsetslice(l1, start, stop, l1) # l1 twice! + expected = [42, 43, 44, 45] + expected[start:stop] = [42, 43, 44, 45] + self.check_list(l1, expected) + class TestFixedSizeListImpl(BaseTestListImpl): def sample_list(self): # [42, 43, 44, 45] From pypy.commits at gmail.com Fri Dec 18 15:07:29 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 12:07:29 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Fix the tests (writing tons of comments) Message-ID: <56746781.a658c20a.49ee6.0166@mx.google.com> Author: Armin Rigo Branch: fix-2198 Changeset: r81385:e9ae1b34a1f6 Date: 2015-12-18 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/e9ae1b34a1f6/ Log: Fix the tests (writing tons of comments) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -271,11 +271,16 @@ copy_item(source, dest, source_start, dest_start) return - # supports non-overlapping copies only + # supports non-overlapping copies only, or as a very special case, + # copies that are identical and so don't need to do anything + # (this is needed to make one case of ll_listsetslice() easier) + # xxx I suppose that the C function memcpy(p,q,r), in practice, is + # always fine and doing nothing if called with p == q... if not we_are_translated(): if source == dest: assert (source_start + length <= dest_start or - dest_start + length <= source_start) + dest_start + length <= source_start or + source_start == dest_start) TP = lltype.typeOf(source).TO assert TP == lltype.typeOf(dest).TO diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -968,22 +968,52 @@ len1 = l1.ll_length() len2 = l2.ll_length() ll_assert(start >= 0, "l[start:x] = l with unexpectedly negative start") - ll_assert(start <= len1, "l[start:x] = l with start > len(l)") ll_assert(stop <= len1, "stop cannot be past the end of l1") - if len2 == stop - start: + ll_assert(start <= stop, "l[start:stop] with start > stop") + len_replace = stop - start + if len2 == len_replace: ll_arraycopy(l2, l1, 0, start, len2) - elif len2 < stop - start: + else: + _ll_listsetslice_resize(l1, start, len_replace, l2) + +def _ll_listsetslice_resize(l1, start, len_replace, l2): + # a separate function, so that ll_listsetslice() can be JITted + len1 = l1.ll_length() + len2 = l2.ll_length() + delta = len2 - len_replace + # + if delta < 0: # len2 < len_replace ll_arraycopy(l2, l1, 0, start, len2) - ll_arraycopy(l1, l1, stop, start + len2, len1 - stop) - l1._ll_resize_le(len1 + len2 - (stop - start)) - else: # len2 > stop - start: + # Shift the items left from l1[start+len_replace:] to l1[start+len2:]. + # Usually the ranges overlap, so can't use ll_arraycopy. Instead + # we will proceed item-by-item from left to right. 'j' is the + # source item to copy. + j = start + len_replace + while j < len1: + l1.ll_setitem_fast(j + delta, l1.ll_getitem_fast(j)) + j += 1 + l1._ll_resize_le(len1 + delta) # this is < len1 + # + else: # len2 > len_replace: try: - newlength = ovfcheck(len1 + len2) + newlength = ovfcheck(len1 + delta) except OverflowError: raise MemoryError l1._ll_resize_ge(newlength) - ll_arraycopy(l1, l1, stop, start + len2, len1 - stop) - ll_arraycopy(l2, l1, 0, start, len2) + # Shift the items right from l1[start+len_replace:] to l1[start+len2:]. + # Usually the ranges overlap, so can't use ll_arraycopy. Instead + # we will proceed item-by-item from right to left. Here, 'j' is + # the target position to fill. + j_min = start + len2 + j = newlength - 1 + while j >= j_min: + l1.ll_setitem_fast(j, l1.ll_getitem_fast(j - delta)) + j -= 1 + # We could usually use ll_arraycopy() for the rest, but not if + # l1 == l2... so instead we just continue to copy item-by-item. + while j >= start: + l1.ll_setitem_fast(j, l2.ll_getitem_fast(j - start)) + j -= 1 # ____________________________________________________________ diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -92,14 +92,14 @@ class TestListImpl(BaseTestListImpl): - def sample_list(self): # [42, 43, 44, 45] + def sample_list(self, factor=1): # [42, 43, 44, 45] rlist = ListRepr(None, signed_repr) rlist.setup() l = ll_newlist(rlist.lowleveltype.TO, 3) - ll_setitem(l, 0, 42) - ll_setitem(l, -2, 43) - ll_setitem_nonneg(l, 2, 44) - ll_append(l, 45) + ll_setitem(l, 0, 42 * factor) + ll_setitem(l, -2, 43 * factor) + ll_setitem_nonneg(l, 2, 44 * factor) + ll_append(l, 45 * factor) return l def test_rlist_del(self): @@ -141,11 +141,11 @@ for stop in range(start, 5): for len2 in range(5): l1 = self.sample_list() # [42, 43, 44, 45] - l2 = self.sample_list() + l2 = self.sample_list(10) # [420, 430, 440, 450] ll_listdelslice_startonly(l2, len2) # initial slice ll_listsetslice(l1, start, stop, l2) expected = [42, 43, 44, 45] - expected[start:stop] = [42, 43, 44, 45][:len2] + expected[start:stop] = [420, 430, 440, 450][:len2] self.check_list(l1, expected) def test_rlist_setslice_overlapping(self): From pypy.commits at gmail.com Fri Dec 18 15:07:44 2015 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Dec 2015 12:07:44 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: merge; Message-ID: <56746790.4a5ec20a.af75e.fffff736@mx.google.com> Author: fijal Branch: cpyext-ext Changeset: r81387:8ec857f8797e Date: 2015-12-18 22:06 +0200 http://bitbucket.org/pypy/pypy/changeset/8ec857f8797e/ Log: merge; diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1074,8 +1074,7 @@ import pypy.module.cpyext.ndarrayobject global GLOBALS, SYMBOLS_C, separate_module_files GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") - SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS', - '_PyArray_CopyInto'] + SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] separate_module_files.append(source_dir / "ndarrayobject.c") return use_micronumpy diff --git a/pypy/module/cpyext/include/numpy/arrayobject.h b/pypy/module/cpyext/include/numpy/arrayobject.h --- a/pypy/module/cpyext/include/numpy/arrayobject.h +++ b/pypy/module/cpyext/include/numpy/arrayobject.h @@ -197,11 +197,9 @@ PyAPI_FUNC(void) _PyArray_FILLWBYTE(PyObject* obj, int val); PyAPI_FUNC(PyObject *) _PyArray_ZEROS(int nd, npy_intp* dims, int type_num, int fortran); -PyAPI_FUNC(int) _PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src); #define PyArray_FILLWBYTE _PyArray_FILLWBYTE #define PyArray_ZEROS _PyArray_ZEROS -#define PyArray_CopyInto _PyArray_CopyInto #define PyArray_Resize(self, newshape, refcheck, fortran) (NULL) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -170,13 +170,13 @@ return w_array @cpython_api([Py_ssize_t], PyObject) -def _PyArray_DescrFromType(space, typenum): +def PyArray_DescrFromType(space, typenum): try: dtype = get_dtype_cache(space).dtypes_by_num[typenum] return dtype except KeyError: raise OperationError(space.w_ValueError, space.wrap( - '_PyArray_DescrFromType called with invalid dtype %d' % typenum)) + 'PyArray_DescrFromType called with invalid dtype %d' % typenum)) @cpython_api([PyObject, Py_ssize_t, Py_ssize_t, Py_ssize_t], PyObject) def _PyArray_FromObject(space, w_obj, typenum, min_depth, max_depth): @@ -250,6 +250,16 @@ return simple_new(space, nd, dims, typenum, order=order, owning=owning, w_subtype=w_subtype) + at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) +def PyArray_CopyInto(space, w_dest, w_src): + assert isinstance(w_dest, W_NDimArray) + assert isinstance(w_src, W_NDimArray) + space.appexec([w_dest, w_src], """(dest, src): + dest[:] = src + """ ) + return 0 + + gufunctype = lltype.Ptr(ufuncs.GenericUfunc) # XXX single rffi.CArrayPtr(gufunctype) does not work, this does, is there # a problem with casting function pointers? diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -16,10 +16,3 @@ return arr; } -int -_PyArray_CopyInto(PyArrayObject* dest, PyArrayObject* src) -{ - memcpy(PyArray_DATA(dest), PyArray_DATA(src), PyArray_NBYTES(dest)); - return 0; -} - diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -47,7 +47,7 @@ assert 'PyModule_Check' in api.FUNCTIONS assert api.FUNCTIONS['PyModule_Check'].argtypes == [api.PyObject] -def compile_extension_module(space, modname, **kwds): +def compile_extension_module(space, modname, include_dirs=[], **kwds): """ Build an extension module and return the filename of the resulting native code file. @@ -73,11 +73,11 @@ else: kwds["link_files"] = [str(api_library + '.so')] if sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-g", "-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( - include_dirs=api.include_dirs, + include_dirs=api.include_dirs + include_dirs, **kwds ) eci = eci.convert_sources_to_files() @@ -91,7 +91,7 @@ soname.rename(pydname) return str(pydname) -def compile_extension_module_applevel(space, modname, **kwds): +def compile_extension_module_applevel(space, modname, include_dirs=[], **kwds): """ Build an extension module and return the filename of the resulting native code file. @@ -107,11 +107,11 @@ elif sys.platform == 'darwin': pass elif sys.platform.startswith('linux'): - kwds["compile_extra"]=["-Werror=implicit-function-declaration"] + kwds["compile_extra"]=["-g","-Werror=implicit-function-declaration"] modname = modname.split('.')[-1] eci = ExternalCompilationInfo( - include_dirs = [space.include_dir], + include_dirs = [space.include_dir] + include_dirs, **kwds ) eci = eci.convert_sources_to_files() @@ -277,10 +277,10 @@ return space.wrap(pydname) @gateway.unwrap_spec(name=str, init='str_or_None', body=str, - load_it=bool, filename='str_or_None', + load_it=bool, filename='str_or_None', PY_SSIZE_T_CLEAN=bool) - def import_module(space, name, init=None, body='', - load_it=True, filename=None, + def import_module(space, name, init=None, body='', load_it=True, + filename=None, w_include_dirs=None, PY_SSIZE_T_CLEAN=False): """ init specifies the overall template of the module. @@ -291,6 +291,10 @@ if filename is None, the module name will be used to construct the filename. """ + if w_include_dirs is None: + include_dirs = [] + else: + include_dirs = [space.str_w(s) for s in space.listview(w_include_dirs)] if init is not None: code = """ %(PY_SSIZE_T_CLEAN)s @@ -317,7 +321,7 @@ filename = py.path.local(pypydir) / 'module' \ / 'cpyext'/ 'test' / (filename + ".c") kwds = dict(separate_module_files=[filename]) - + kwds['include_dirs'] = include_dirs mod = self.compile_extension_module(space, name, **kwds) if load_it: @@ -340,9 +344,10 @@ space.sys.get('modules'), space.wrap(name)) - @gateway.unwrap_spec(modname=str, prologue=str, more_init=str, PY_SSIZE_T_CLEAN=bool) + @gateway.unwrap_spec(modname=str, prologue=str, + more_init=str, PY_SSIZE_T_CLEAN=bool) def import_extension(space, modname, w_functions, prologue="", - more_init="", PY_SSIZE_T_CLEAN=False): + w_include_dirs=None, more_init="", PY_SSIZE_T_CLEAN=False): functions = space.unwrap(w_functions) methods_table = [] codes = [] @@ -368,6 +373,7 @@ if more_init: init += more_init return import_module(space, name=modname, init=init, body=body, + w_include_dirs=w_include_dirs, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) @gateway.unwrap_spec(name=str) @@ -388,12 +394,23 @@ def interp2app(func): from distutils.sysconfig import get_python_inc class FakeSpace(object): + def passthrough(self, arg): + return arg + listview = passthrough + str_w = passthrough def unwrap(self, args): - return args + try: + return args.str_w(None) + except: + return args fake = FakeSpace() fake.include_dir = get_python_inc() fake.config = self.space.config def run(*args, **kwargs): + for k in kwargs.keys(): + if k not in func.unwrap_spec and not k.startswith('w_'): + v = kwargs.pop(k) + kwargs['w_' + k] = v return func(fake, *args, **kwargs) return run def wrap(func): diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -226,11 +226,19 @@ ''' class AppTestNDArray(AppTestCpythonExtensionBase): - if self.runappdirect: - try: - import numpy - except: - skip('numpy not importable') + + def setup_class(cls): + AppTestCpythonExtensionBase.setup_class.im_func(cls) + if cls.runappdirect: + try: + import numpy + cls.w_numpy_include = [numpy.get_include()] + except: + skip('numpy not importable') + else: + cls.w_numpy_include = cls.space.wrap([]) + + def test_ndarray_object_c(self): mod = self.import_extension('foo', [ ("test_simplenew", "METH_NOARGS", @@ -244,7 +252,7 @@ ''' npy_intp dims[2] ={2, 3}; PyObject * obj = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj, 42); + PyArray_FILLWBYTE((PyArrayObject*)obj, 42); return obj; ''' ), @@ -252,20 +260,27 @@ ''' npy_intp dims1[2] ={2, 3}; npy_intp dims2[2] ={3, 2}; + int ok; PyObject * obj1 = PyArray_ZEROS(2, dims1, 11, 0); PyObject * obj2 = PyArray_ZEROS(2, dims2, 11, 0); - PyArray_FILLWBYTE(obj2, 42); - PyArray_CopyInto(obj2, obj1); - Py_DECREF(obj1); - return obj2; + PyArray_FILLWBYTE((PyArrayObject*)obj2, 42); + ok = PyArray_CopyInto((PyArrayObject*)obj2, (PyArrayObject*)obj1); + Py_DECREF(obj2); + if (ok < 0) + { + /* Should have failed */ + Py_DECREF(obj1); + return NULL; + } + return obj1; ''' ), ("test_FromAny", "METH_NOARGS", ''' npy_intp dims[2] ={2, 3}; PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj1, 42); - obj2 = _PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); + PyArray_FILLWBYTE((PyArrayObject*)obj1, 42); + obj2 = PyArray_FromAny(obj1, NULL, 0, 0, 0, NULL); Py_DECREF(obj1); return obj2; ''' @@ -274,8 +289,8 @@ ''' npy_intp dims[2] ={2, 3}; PyObject * obj2, * obj1 = PyArray_SimpleNew(2, dims, 1); - PyArray_FILLWBYTE(obj1, 42); - obj2 = _PyArray_FromObject(obj1, 12, 0, 0); + PyArray_FILLWBYTE((PyArrayObject*)obj1, 42); + obj2 = PyArray_FromObject(obj1, 12, 0, 0); Py_DECREF(obj1); return obj2; ''' @@ -283,10 +298,19 @@ ("test_DescrFromType", "METH_O", """ Signed typenum = PyInt_AsLong(args); - return _PyArray_DescrFromType(typenum); + return PyArray_DescrFromType(typenum); """ ), - ], prologue='#include ') + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + ''', + more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif + ''') arr = mod.test_simplenew() assert arr.shape == (2, 3) assert arr.dtype.num == 11 #float32 dtype @@ -294,17 +318,18 @@ assert arr.shape == (2, 3) assert arr.dtype.num == 1 #int8 dtype assert (arr == 42).all() - arr = mod.test_copy() - assert (arr == 0).all() + raises(ValueError, mod.test_copy) #Make sure these work without errors arr = mod.test_FromAny() arr = mod.test_FromObject() dt = mod.test_DescrFromType(11) assert dt.num == 11 - def test_pass_ndarray_object_to_c(self): - from _numpypy.multiarray import ndarray + if self.runappdirect: + from numpy import ndarray + else: + from _numpypy.multiarray import ndarray mod = self.import_extension('foo', [ ("check_array", "METH_VARARGS", ''' @@ -314,13 +339,25 @@ Py_INCREF(obj); return obj; '''), - ], prologue='#include ') + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + ''', + more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif + ''') array = ndarray((3, 4), dtype='d') assert mod.check_array(array) is array raises(TypeError, "mod.check_array(42)") def test_ufunc(self): - from _numpypy.multiarray import arange + if self.runappdirect: + py.test.xfail('why does this segfault on cpython?') + else: + from _numpypy.multiarray import arange mod = self.import_extension('foo', [ ("create_ufunc_basic", "METH_NOARGS", """ @@ -356,9 +393,13 @@ "a ufunc that tests a more complicated signature", 0, "(m,m)->(m,m)"); """), - ], prologue=''' - #include "numpy/ndarraytypes.h" - /*#include generated by numpy setup.py*/ + ], include_dirs=self.numpy_include, + prologue=''' + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + #ifndef PYPY_VERSION + #include /*generated by numpy setup.py*/ + #endif typedef void (*PyUFuncGenericFunction) (char **args, npy_intp *dimensions, @@ -423,6 +464,10 @@ *((float *)args[1]) = res; }; + ''', more_init = ''' + #ifndef PYPY_VER + import_array(); + #endif ''') sq = arange(18, dtype="float32").reshape(2,3,3) float_ufunc = mod.create_float_ufunc_3x3() From pypy.commits at gmail.com Fri Dec 18 15:07:43 2015 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Dec 2015 12:07:43 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: do the same thing, but more in line with what cpython does Message-ID: <5674678f.6a69c20a.d649d.fffff8ee@mx.google.com> Author: fijal Branch: cpyext-ext Changeset: r81386:911a6c69a1c9 Date: 2015-12-18 22:05 +0200 http://bitbucket.org/pypy/pypy/changeset/911a6c69a1c9/ Log: do the same thing, but more in line with what cpython does diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -178,7 +178,8 @@ iteration ends when the sequence raises IndexError for the subscripting operation. """ - return space.iter(w_seq) + # XXX check for bad internal call + return space.newseqiter(w_seq) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PySequence_SetItem(space, w_o, i, w_v): From pypy.commits at gmail.com Fri Dec 18 15:27:26 2015 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Dec 2015 12:27:26 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56746c2e.25edc20a.49773.0d7e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r681:c3696d3cdaff Date: 2015-12-18 21:27 +0100 http://bitbucket.org/pypy/pypy.org/changeset/c3696d3cdaff/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $30234 of $80000 (37.8%) + $30294 of $80000 (37.9%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Fri Dec 18 20:56:11 2015 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Dec 2015 17:56:11 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Wrap FunctionCodeGenerator constructor in a factory function Message-ID: <5674b93b.d6601c0a.fa6f0.47bb@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81388:c5365dc26aa4 Date: 2015-12-19 02:55 +0100 http://bitbucket.org/pypy/pypy/changeset/c5365dc26aa4/ Log: Wrap FunctionCodeGenerator constructor in a factory function + Move some of its side-effecting code there diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -18,6 +18,16 @@ KEEP_INLINED_GRAPHS = False +def make_funcgen(graph, db, exception_policy=None, functionname=None): + graph._seen_by_the_backend = True + # apply the exception transformation + if db.exctransformer: + db.exctransformer.create_exception_handling(graph) + # apply the gc transformation + if db.gctransformer: + db.gctransformer.transform_graph(graph) + return FunctionCodeGenerator(graph, db, exception_policy, functionname) + class FunctionCodeGenerator(object): """ Collects information about a function which we have to generate @@ -25,21 +35,13 @@ """ def __init__(self, graph, db, exception_policy=None, functionname=None): - graph._seen_by_the_backend = True self.graph = graph self.db = db self.gcpolicy = db.gcpolicy self.exception_policy = exception_policy self.functionname = functionname - # apply the exception transformation - if self.db.exctransformer: - self.db.exctransformer.create_exception_handling(self.graph) - # apply the gc transformation - if self.db.gctransformer: - self.db.gctransformer.transform_graph(self.graph) - #self.graph.show() + self.collect_var_and_types() - for v in self.vars: T = v.concretetype # obscure: skip forward references and hope for the best diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -3,7 +3,7 @@ Void, OpaqueType, Float, RuntimeTypeInfo, getRuntimeTypeInfo, Char, _subarray) from rpython.rtyper.lltypesystem import llmemory, llgroup -from rpython.translator.c.funcgen import FunctionCodeGenerator +from rpython.translator.c.funcgen import make_funcgen from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring from rpython.translator.c.support import cdecl, forward_cdecl, somelettersfrom from rpython.translator.c.support import c_char_array_constant, barebonearray @@ -901,7 +901,7 @@ from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db, force_stub=True) - return FunctionCodeGenerator(graph, db) + return make_funcgen(graph, db) def sandbox_transform(fnobj, db): # for --sandbox: replace a function like os_open_llimpl() with @@ -909,7 +909,7 @@ # perform the operation. from rpython.translator.sandbox import rsandbox graph = rsandbox.get_external_function_sandbox_graph(fnobj, db) - return FunctionCodeGenerator(graph, db) + return make_funcgen(graph, db) def select_function_code_generators(fnobj, db, functionname): sandbox = db.need_sandboxing(fnobj) @@ -918,8 +918,7 @@ # apply the sandbox transformation return sandbox_transform(fnobj, db) exception_policy = getattr(fnobj, 'exception_policy', None) - return FunctionCodeGenerator( - fnobj.graph, db, exception_policy, functionname) + return make_funcgen(fnobj.graph, db, exception_policy, functionname) elif getattr(fnobj, 'external', None) is not None: if sandbox: return sandbox_stub(fnobj, db) From pypy.commits at gmail.com Sat Dec 19 03:06:25 2015 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Dec 2015 00:06:25 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: Uploaded bundles that contain the pypy repo Message-ID: <56751001.87de1c0a.ab36d.ffff877e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r682:f3499836f7d1 Date: 2015-12-19 09:06 +0100 http://bitbucket.org/pypy/pypy.org/changeset/f3499836f7d1/ Log: Uploaded bundles that contain the pypy repo diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -242,6 +242,26 @@
     hg clone https://bitbucket.org/pypy/pypy
     
    +

    The above command may take a long time to run and if it aborts, it +is not resumable. You may prefer this way:

    +
    +hg clone -r null https://bitbucket.org/pypy/pypy
    +cd pypy
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-01.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-02.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-03.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-04.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-05.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-06.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-07.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-08.bz2
    +hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-09.bz2
    +hg pull
    +hg update
    +
    +

    If needed, you can also download the bz2 files by other means. +You can then replace the multiple unbundle commands above with +a single hg unbundle pypy-bundle-*.bz2.

  • Make sure you installed the dependencies. See the list here.

  • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -260,6 +260,27 @@ hg clone https://bitbucket.org/pypy/pypy + The above command may take a long time to run and if it aborts, it + is not resumable. You may prefer this way:: + + hg clone -r null https://bitbucket.org/pypy/pypy + cd pypy + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-01.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-02.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-03.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-04.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-05.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-06.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-07.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-08.bz2 + hg unbundle http://buildbot.pypy.org/bundle/pypy-bundle-09.bz2 + hg pull + hg update + + If needed, you can also download the bz2 files by other means. + You can then replace the multiple ``unbundle`` commands above with + a single ``hg unbundle pypy-bundle-*.bz2``. + 2. Make sure you **installed the dependencies.** See the list here__. .. __: http://pypy.readthedocs.org/en/latest/build.html#install-build-time-dependencies From pypy.commits at gmail.com Sat Dec 19 07:59:15 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:15 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Fix 2211 : cryptic exception message Message-ID: <567554a3.42ddc20a.f9258.4acd@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81390:cdedb2bd7d06 Date: 2015-12-16 16:01 +0100 http://bitbucket.org/pypy/pypy/changeset/cdedb2bd7d06/ Log: Fix 2211 : cryptic exception message At translation time, when attempting to use extended slicing in rpython, Exception message was cryptic and not really helpful: AttributeError: type object 'newslice' has no attribute 'get_specialization' Now we have the following message, which points to the faulty line : [translation:ERROR] AnnotatorError: [translation:ERROR] [translation:ERROR] Cannot use extended slicing in rpython [translation:ERROR] [translation:ERROR] [translation:ERROR] v483 = newslice(v481, v482, step_0) [translation:ERROR] [translation:ERROR] In : [translation:ERROR] Happened at file /home/vlegoll/repo/upstream/pypy-for- upstream2/pypy/objspace/std/listobject.py line 1524 [translation:ERROR] [translation:ERROR] ==> items[start:start + slicelength:step] = other_items diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -508,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [AnnotatorError] + + def get_specialization(self, s_arg1, s_arg2, s_arg3): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 From pypy.commits at gmail.com Sat Dec 19 07:59:13 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:13 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: new branch Message-ID: <567554a1.0b5e1c0a.187fb.ffffe073@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81389:2da0d7446210 Date: 2015-12-16 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/2da0d7446210/ Log: new branch From pypy.commits at gmail.com Sat Dec 19 07:59:20 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:20 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Use NewSlice.consider() instead get_specialization() Message-ID: <567554a8.46b71c0a.dd0a6.ffffdf7f@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81392:59c849746214 Date: 2015-12-16 19:11 +0100 http://bitbucket.org/pypy/pypy/changeset/59c849746214/ Log: Use NewSlice.consider() instead get_specialization() for extended slicing Exception message diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -512,7 +512,7 @@ opname = 'newslice' canraise = [AnnotatorError] - def get_specialization(self, s_arg1, s_arg2, s_arg3): + def consider(self, annotator): raise AnnotatorError("Cannot use extended slicing in rpython") From pypy.commits at gmail.com Sat Dec 19 07:59:21 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:21 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Update what's new Message-ID: <567554a9.6408c20a.7346.ffffe62a@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81393:2c48240f303c Date: 2015-12-16 19:27 +0100 http://bitbucket.org/pypy/pypy/changeset/2c48240f303c/ Log: Update what's new diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -79,3 +79,7 @@ Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. .. branch: flowspace-cleanups +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing in rpython + From pypy.commits at gmail.com Sat Dec 19 07:59:18 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:18 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Add a failing test for non cryptic extended slicing exception message Message-ID: <567554a6.8c921c0a.d729d.ffffde97@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81391:1d79773109e7 Date: 2015-12-16 19:09 +0100 http://bitbucket.org/pypy/pypy/changeset/1d79773109e7/ Log: Add a failing test for non cryptic extended slicing exception message diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,19 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + def f(start, end, step): + return [1, 2, 3][start:end:step] + + a = self.RPythonAnnotator() + py.test.raises(AnnotatorError, "a.build_types(f, [int, int, int])") + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[:-1] + + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): From pypy.commits at gmail.com Sat Dec 19 07:59:23 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 04:59:23 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Test more cases Message-ID: <567554ab.0b831c0a.231fb.ffffe5eb@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81394:b8db626fca5e Date: 2015-12-16 21:09 +0100 http://bitbucket.org/pypy/pypy/changeset/b8db626fca5e/ Log: Test more cases recreate the annotator between each test diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3517,18 +3517,31 @@ assert isinstance(s, annmodel.SomeUnicodeString) def test_extended_slice(self): + a = self.RPythonAnnotator() def f(start, end, step): return [1, 2, 3][start:end:step] - - a = self.RPythonAnnotator() - py.test.raises(AnnotatorError, "a.build_types(f, [int, int, int])") - a.build_types(f, [annmodel.SomeInteger(nonneg=True), - annmodel.SomeInteger(nonneg=True), - annmodel.SomeInteger(nonneg=True)]) + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) def f(x): - return x[:-1] - - a.build_types(f, [str]) + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): From pypy.commits at gmail.com Sat Dec 19 08:31:57 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 05:31:57 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Remove useless canraise Message-ID: <56755c4d.2269c20a.7eae7.ffffe4d2@mx.google.com> Author: Vincent Legoll Branch: fix-2211 Changeset: r81395:b64515209948 Date: 2015-12-19 14:28 +0100 http://bitbucket.org/pypy/pypy/changeset/b64515209948/ Log: Remove useless canraise diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -510,7 +510,6 @@ class NewSlice(HLOperation): opname = 'newslice' - canraise = [AnnotatorError] def consider(self, annotator): raise AnnotatorError("Cannot use extended slicing in rpython") From pypy.commits at gmail.com Sat Dec 19 08:38:56 2015 From: pypy.commits at gmail.com (rlamy) Date: Sat, 19 Dec 2015 05:38:56 -0800 (PST) Subject: [pypy-commit] pypy fix-2211: Close branch before merging Message-ID: <56755df0.068e1c0a.12241.ffffe80c@mx.google.com> Author: Ronan Lamy Branch: fix-2211 Changeset: r81396:cb2c6a7d1418 Date: 2015-12-19 14:34 +0100 http://bitbucket.org/pypy/pypy/changeset/cb2c6a7d1418/ Log: Close branch before merging From pypy.commits at gmail.com Sat Dec 19 08:38:58 2015 From: pypy.commits at gmail.com (rlamy) Date: Sat, 19 Dec 2015 05:38:58 -0800 (PST) Subject: [pypy-commit] pypy default: Merge branch 'fix-2211' Message-ID: <56755df2.c3e31c0a.6cd38.ffffeb9f@mx.google.com> Author: Ronan Lamy Branch: Changeset: r81397:9859abb0ca1a Date: 2015-12-19 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/9859abb0ca1a/ Log: Merge branch 'fix-2211' diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,7 @@ .. branch: test-AF_NETLINK .. branch: small-cleanups-misc .. branch: cpyext-slotdefs + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing in rpython diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -508,6 +508,13 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 From pypy.commits at gmail.com Sat Dec 19 14:22:23 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 11:22:23 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Add new test cases for assigning slices Message-ID: <5675ae6f.cf111c0a.33d4a.5f4e@mx.google.com> Author: Vincent Legoll Branch: fix-2198 Changeset: r81398:f0c46bb74141 Date: 2015-12-19 10:18 +0100 http://bitbucket.org/pypy/pypy/changeset/f0c46bb74141/ Log: Add new test cases for assigning slices diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1003,6 +1003,8 @@ l = l0 = ['a', 'b'] l[1:1] = ['ae'] assert l == ['a', 'ae', 'b'] + l[1:1] = [] + assert l == ['a', 'ae', 'b'] l[1:100] = ['B'] assert l == ['a', 'B'] l[:] = [] @@ -1010,9 +1012,10 @@ assert l is l0 l = [] - l2 = range(3) - l.__setslice__(0,3,l2) - assert l == [0,1,2] + length = 3 + l2 = range(length) + l.__setslice__(0, length, l2) + assert l == range(length) def test_assign_extended_slice(self): l = l0 = ['a', 'b', 'c'] @@ -1043,13 +1046,10 @@ l[:] = l assert l == [1,2,3,4] - l = [1,2,3,4] + l = l0 = [1,2,3,4] l[0:2] = l assert l == [1,2,3,4,3,4] - - l = [1,2,3,4] - l[0:2] = l - assert l == [1,2,3,4,3,4] + assert l0 is l l = [1,2,3,4,5,6,7,8,9,10] raises(ValueError, "l[5::-1] = l") @@ -1061,6 +1061,11 @@ l[5:] = l assert l == [1,2,3,4,5,1,2,3,4,5,6,7,8,9,10] + l = l0 = [1,2,3,4,5,6] + raises(ValueError, "l[::-2] = l") + assert l == [1,2,3,4,5,6] + assert l is l0 + l = [1,2,3,4,5,6] l[::-1] = l assert l == [6,5,4,3,2,1] @@ -1337,6 +1342,10 @@ l.__setslice__(0, 2, [5, 6]) assert l == [5, 6, 3, 4] + l = [1,2,3,4] + l.__setslice__(-2, -1, [5, 6]) + assert l == [5, 6, 1, 2, 3,4] + l = [] l.__setslice__(0,0,[3,4,5]) assert l == [3,4,5] From pypy.commits at gmail.com Sat Dec 19 14:22:24 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sat, 19 Dec 2015 11:22:24 -0800 (PST) Subject: [pypy-commit] pypy fix-2198: Add new test cases for assigning slices from self Message-ID: <5675ae70.8d071c0a.f4d45.5e71@mx.google.com> Author: Vincent Legoll Branch: fix-2198 Changeset: r81399:0899e821363b Date: 2015-12-19 10:25 +0100 http://bitbucket.org/pypy/pypy/changeset/0899e821363b/ Log: Add new test cases for assigning slices from self diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -396,6 +396,28 @@ def test_setslice(self): def dummyfn(): l = [10, 9, 8, 7] + l[:2] = l[2:4] + return l[0], l[1], l[2], l[3] + res = self.interpret(dummyfn, ()) + assert res.item0 == 8 + assert res.item1 == 7 + assert res.item2 == 8 + assert res.item3 == 7 + + def dummyfn(): + l = [10, 9, 8, 7] + l[1:3] = l + return l[0], l[1], l[2], l[3], l[4], l[5] + res = self.interpret(dummyfn, ()) + assert res.item0 == 10 + assert res.item1 == 10 + assert res.item2 == 9 + assert res.item3 == 8 + assert res.item4 == 7 + assert res.item5 == 7 + + def dummyfn(): + l = [10, 9, 8, 7] l[:2] = [6, 5] return l[0], l[1], l[2], l[3] res = self.interpret(dummyfn, ()) From pypy.commits at gmail.com Sun Dec 20 02:04:34 2015 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Dec 2015 23:04:34 -0800 (PST) Subject: [pypy-commit] pypy default: comment Message-ID: <56765302.85b61c0a.99509.ffffee2f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81400:17d955c59451 Date: 2015-12-20 08:03 +0100 http://bitbucket.org/pypy/pypy/changeset/17d955c59451/ Log: comment diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -532,7 +532,9 @@ return # the ZERO_ARRAY operation will be optimized according to what # SETARRAYITEM_GC we see before the next allocation operation. - # See emit_pending_zeros(). + # See emit_pending_zeros(). (This optimization is done by + # hacking the object 'o' in-place: e.g., o.getarg(1) may be + # replaced with another constant greater than 0.) o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], descr=arraydescr) self.emit_op(o) From pypy.commits at gmail.com Sun Dec 20 02:55:57 2015 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Dec 2015 23:55:57 -0800 (PST) Subject: [pypy-commit] pypy default: theoretical fix for signed chars. It might be the case that Message-ID: <56765f0d.c4efc20a.c40ff.0016@mx.google.com> Author: Armin Rigo Branch: Changeset: r81401:9e73e592a9b0 Date: 2015-12-20 08:55 +0100 http://bitbucket.org/pypy/pypy/changeset/9e73e592a9b0/ Log: theoretical fix for signed chars. It might be the case that gc_store is never called with a negative size, so this doesn't change anything, but the abs() here is confusing diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1039,8 +1039,7 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = size_box.value - itemsize = abs(size) + size = abs(size_box.value) if size == 1: need_lower_byte = True else: @@ -1049,7 +1048,7 @@ need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.perform_discard(op, [base_loc, ofs_loc, value_loc, - imm(itemsize)]) + imm(size)]) def consider_gc_store_indexed(self, op): args = op.getarglist() @@ -1062,8 +1061,7 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = size_box.value - itemsize = abs(size) + size = abs(size_box.value) if size == 1: need_lower_byte = True else: @@ -1072,7 +1070,7 @@ need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.perform_discard(op, [base_loc, ofs_loc, value_loc, - imm(factor), imm(offset), imm(itemsize)]) + imm(factor), imm(offset), imm(size)]) def consider_increment_debug_counter(self, op): base_loc = self.loc(op.getarg(0)) From pypy.commits at gmail.com Sun Dec 20 03:34:54 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 00:34:54 -0800 (PST) Subject: [pypy-commit] pypy default: Kill the last bits of ZERO_PTR_FIELD that are still there but never used Message-ID: <5676682e.4d8e1c0a.f6ed6.0b27@mx.google.com> Author: Armin Rigo Branch: Changeset: r81402:b697480a86fb Date: 2015-12-20 09:27 +0100 http://bitbucket.org/pypy/pypy/changeset/b697480a86fb/ Log: Kill the last bits of ZERO_PTR_FIELD that are still there but never used (this used to be an operation generated by rewrite.py: it is never produced in the front-end, so rewrite.py doesn't need to handle it as input) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -296,13 +296,6 @@ self.cpu.translate_support_code) self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), itemsize, itemsize, basesize) - elif op.getopnum() == rop.ZERO_PTR_FIELD: - ofs = op.getarg(1).getint() - size = WORD - index_box = ConstInt(0) - value_box = ConstInt(0) - self.emit_gc_store_or_indexed(op, op.getarg(0), index_box, value_box, - size, 1, ofs) return False @@ -678,7 +671,7 @@ del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # - # Then write the ZERO_PTR_FIELDs that are still pending + # Then write the NULL-pointer-writing ops that are still pending for v, d in self._delayed_zero_setfields.iteritems(): v = self.get_box_replacement(v) for ofs in d.iterkeys(): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4964,52 +4964,6 @@ [boxfloat(12.5)], 'int') assert res == struct.unpack("I", struct.pack("f", 12.5))[0] - def test_zero_ptr_field(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph can't do zero_ptr_field") - T = lltype.GcStruct('T') - S = lltype.GcStruct('S', ('x', lltype.Ptr(T))) - tdescr = self.cpu.sizeof(T) - sdescr = self.cpu.sizeof(S) - fielddescr = self.cpu.fielddescrof(S, 'x') - loop = parse(""" - [] - p0 = new(descr=tdescr) - p1 = new(descr=sdescr) - setfield_gc(p1, p0, descr=fielddescr) - zero_ptr_field(p1, %d) - finish(p1) - """ % fielddescr.offset, namespace=locals()) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - deadframe = self.cpu.execute_token(looptoken) - ref = self.cpu.get_ref_value(deadframe, 0) - s = lltype.cast_opaque_ptr(lltype.Ptr(S), ref) - assert not s.x - - def test_zero_ptr_field_2(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph does not do zero_ptr_field") - - from rpython.jit.backend.llsupport import symbolic - S = lltype.GcStruct('S', ('x', lltype.Signed), - ('p', llmemory.GCREF), - ('y', lltype.Signed)) - s = lltype.malloc(S) - s.x = -1296321 - s.y = -4398176 - s_ref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - s.p = s_ref - ofs_p, _ = symbolic.get_field_token(S, 'p', False) - # - self.execute_operation(rop.ZERO_PTR_FIELD, [ - InputArgRef(s_ref), ConstInt(ofs_p)], # OK for now to assume that the - 'void') # 2nd argument is a constant - # - assert s.x == -1296321 - assert s.p == lltype.nullptr(llmemory.GCREF.TO) - assert s.y == -4398176 - def test_zero_array(self): if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph does not do zero_array") diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -745,7 +745,6 @@ OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) - OPERATIONS.append(ZeroPtrFieldOperation(rop.ZERO_PTR_FIELD)) OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -57,19 +57,11 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) - if opnum != rop.ZERO_PTR_FIELD: - result = _execute_arglist(self.cpu, self.fakemetainterp, - opnum, argboxes, descr) - if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) - else: - import ctypes - addr = self.cpu.cast_gcref_to_int(argboxes[0].getref_base()) - offset = argboxes[1].getint() - assert (offset % ctypes.sizeof(ctypes.c_long)) == 0 - ptr = ctypes.cast(addr, ctypes.POINTER(ctypes.c_long)) - ptr[offset / ctypes.sizeof(ctypes.c_long)] = 0 + result = _execute_arglist(self.cpu, self.fakemetainterp, + opnum, argboxes, descr) + if result is not None: + c_result = wrap_constant(result) + op.copy_value_from(c_result) self.loop.operations.append(op) return op diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -367,7 +367,6 @@ rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, - rop.ZERO_PTR_FIELD, rop.ZERO_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -18,7 +18,6 @@ , (rop.SETINTERIORFIELD_RAW, 0, -1) , (rop.SETFIELD_GC, 0, -1) , (rop.SETFIELD_RAW, 0, -1) - , (rop.ZERO_PTR_FIELD, 0, -1) , (rop.ZERO_ARRAY, 0, -1) , (rop.STRSETITEM, 0, -1) , (rop.UNICODESETITEM, 0, -1) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1221,8 +1221,6 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_PTR_FIELD/2/n', # only emitted by the rewrite, clears a pointer field - # at a given constant offset, no descr 'ZERO_ARRAY/3d/n', # only emitted by the rewrite, clears (part of) an array # [arraygcptr, firstindex, length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', From pypy.commits at gmail.com Sun Dec 20 03:59:27 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 00:59:27 -0800 (PST) Subject: [pypy-commit] pypy default: Use INT_LSHIFT instead of INT_MUL when possible Message-ID: <56766def.e16ec20a.f0264.ffffd5b9@mx.google.com> Author: Armin Rigo Branch: Changeset: r81403:77683cabfc5b Date: 2015-12-20 09:57 +0100 http://bitbucket.org/pypy/pypy/changeset/77683cabfc5b/ Log: Use INT_LSHIFT instead of INT_MUL when possible diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,6 +1,6 @@ from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rarithmetic import ovfcheck +from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr @@ -165,7 +165,14 @@ if isinstance(index_box, ConstInt): index_box = ConstInt(index_box.value * factor) else: - index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) + # x & (x - 1) == 0 is a quick test for power of 2 + assert factor > 0 + if (factor & (factor - 1)) == 0: + index_box = ResOperation(rop.INT_LSHIFT, + [index_box, ConstInt(highest_bit(factor))]) + else: + index_box = ResOperation(rop.INT_MUL, + [index_box, ConstInt(factor)]) self.emit_op(index_box) factor = 1 # adjust the constant offset diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -37,6 +37,7 @@ return ','.join([str(n) for n in [descr.itemsize, descr.basesize, size]]) + WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -71,6 +72,12 @@ itzdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'z') itydescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'y') itxdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'x') + S2I = lltype.GcArray(('x', lltype.Ptr(S1)), + ('y', lltype.Ptr(S1)), + ('z', lltype.Ptr(S1)), + ('t', lltype.Ptr(S1))) # size is a power of two + s2i_item_size_in_bits = (4 if WORD == 4 else 5) + ity2descr = get_interiorfield_descr(self.gc_ll_descr, S2I, 'y') R1 = lltype.GcStruct('R', ('x', lltype.Signed), ('y', lltype.Float), ('z', lltype.Ptr(S1))) @@ -90,7 +97,6 @@ # tiddescr = self.gc_ll_descr.fielddescr_tid wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] # F = lltype.GcArray(lltype.Float) fdescr = get_array_descr(self.gc_ll_descr, F) @@ -1224,6 +1230,13 @@ '%(itydescr.arraydescr.basesize' ' + itydescr.fielddescr.offset)d,' '%(itydescr.fielddescr.field_size)d)'], + [True, (1,2,4,8), 'i3 = setinteriorfield_gc(p0,i1,i2,descr=ity2descr)' '->' + 'i4 = int_lshift(i1,' + '%(s2i_item_size_in_bits)d);' + 'i3 = gc_store_indexed(p0,i4,i2,1,' + '%(ity2descr.arraydescr.basesize' + ' + itydescr.fielddescr.offset)d,' + '%(ity2descr.fielddescr.field_size)d)'], ]) def test_gc_load_store_transform(self, support_offset, factors, fromto): self.cpu.load_constant_offset = support_offset From pypy.commits at gmail.com Sun Dec 20 04:28:08 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 01:28:08 -0800 (PST) Subject: [pypy-commit] pypy default: untested: adapt the ARM backend to gc_load/gc_store Message-ID: <567674a8.25edc20a.49773.1897@mx.google.com> Author: Armin Rigo Branch: Changeset: r81404:bc29b98d120a Date: 2015-12-20 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/bc29b98d120a/ Log: untested: adapt the ARM backend to gc_load/gc_store diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) return fcond - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) + # add the base offset + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + # + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = abs(boxes[3].getint()) ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) - ofs, size, sign = unpack_fielddescr(op.getdescr()) + nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - ofs_size = default_imm_size if size < 8 else VMEM_imm_size + ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: @@ -839,17 +827,12 @@ self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size)] + res_loc = self.force_allocate_reg(op) + return [base_loc, ofs_loc, res_loc, imm(nsize)] - prepare_op_getfield_gc_i = _prepare_op_getfield - prepare_op_getfield_gc_r = _prepare_op_getfield - prepare_op_getfield_gc_f = _prepare_op_getfield - prepare_op_getfield_raw_i = _prepare_op_getfield - prepare_op_getfield_raw_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield + prepare_op_gc_load_i = _prepare_op_gc_load + prepare_op_gc_load_r = _prepare_op_gc_load + prepare_op_gc_load_f = _prepare_op_gc_load def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() @@ -859,188 +842,38 @@ self.free_temp_vars() return [base_loc, value_loc] - def _prepare_op_getinteriorfield(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) + def prepare_op_gc_store_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[3].getint() == 1 # scale + ofs = boxes[4].getint() + size = abs(boxes[5].getint()) + assert check_imm_arg(ofs) + return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] + + def _prepare_op_gc_load_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[2].getint() == 1 # scale + ofs = boxes[3].getint() + nsize = boxes[4].getint() + assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] + res_loc = self.force_allocate_reg(op) + return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)] - prepare_op_getinteriorfield_gc_i = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_r = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_f = _prepare_op_getinteriorfield - - def prepare_op_setinteriorfield_gc(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - return [base_loc, index_loc, value_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - prepare_op_setinteriorfield_raw = prepare_op_setinteriorfield_gc - - def prepare_op_arraylen_gc(self, op, fcond): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - arg = op.getarg(0) - base_loc = self.make_sure_var_in_reg(arg) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_op_setarrayitem_gc(self, op, fcond): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(args[0], args) - value_loc = self.make_sure_var_in_reg(args[2], args) - ofs_loc = self.make_sure_var_in_reg(args[1], args) - assert check_imm_arg(ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] - prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc - prepare_op_raw_store = prepare_op_setarrayitem_gc - - def _prepare_op_getarrayitem(self, op, fcond): - boxes = op.getarglist() - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - assert check_imm_arg(ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] - - prepare_op_getarrayitem_gc_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_f = _prepare_op_getarrayitem - prepare_op_raw_load_i = _prepare_op_getarrayitem - prepare_op_raw_load_f = _prepare_op_getarrayitem - - def prepare_op_strlen(self, op, fcond): - args = op.getarglist() - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, args) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - - res = self.force_allocate_reg(op) - self.possibly_free_var(op) - return [l0, l1, res] - - def prepare_op_strgetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0]) - - a1 = boxes[1] - imm_a1 = check_imm_box(a1) - if imm_a1: - ofs_loc = self.convert_to_imm(a1) - else: - ofs_loc = self.make_sure_var_in_reg(a1, boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [res, base_loc, ofs_loc, imm(basesize)] - - def prepare_op_strsetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [value_loc, base_loc, ofs_loc, imm(basesize)] + prepare_op_gc_load_indexed_i = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_r = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_f = _prepare_op_gc_load_indexed prepare_op_copystrcontent = void prepare_op_copyunicodecontent = void prepare_op_zero_array = void - def prepare_op_unicodelen(self, op, fcond): - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, [op.getarg(0)]) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [l0, l1, res] - - def prepare_op_unicodegetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [res, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - - def prepare_op_unicodesetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [value_loc, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - def _prepare_op_same_as(self, op, fcond): arg = op.getarg(0) imm_arg = check_imm_box(arg) @@ -1142,8 +975,7 @@ def prepare_op_cond_call_gc_wb(self, op, fcond): # we force all arguments in a reg because it will be needed anyway by - # the following setfield_gc or setarrayitem_gc. It avoids loading it - # twice from the memory. + # the following gc_store. It avoids loading it twice from the memory. N = op.numargs() args = op.getarglist() arglocs = [self.make_sure_var_in_reg(op.getarg(i), args) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -29,6 +29,10 @@ float_regs = VFPRegisterManager.all_regs frame_reg = fp + # can an ISA instruction handle a factor to the offset? + # XXX should be: tuple(1 << i for i in range(31)) + load_supported_factors = (1,) + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): AbstractLLCPU.__init__(self, rtyper, stats, opts, From pypy.commits at gmail.com Sun Dec 20 04:40:16 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 01:40:16 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in vincentlegoll/pypy/fix-missing-canraise (pull request #382) Message-ID: <56767780.6408c20a.7346.140f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81406:8b08c8ce6257 Date: 2015-12-20 10:39 +0100 http://bitbucket.org/pypy/pypy/changeset/8b08c8ce6257/ Log: Merged in vincentlegoll/pypy/fix-missing-canraise (pull request #382) Looks like canraise needs to be there... diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -510,6 +510,7 @@ class NewSlice(HLOperation): opname = 'newslice' + canraise = [] def consider(self, annotator): raise AnnotatorError("Cannot use extended slicing in rpython") From pypy.commits at gmail.com Sun Dec 20 04:40:29 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Sun, 20 Dec 2015 01:40:29 -0800 (PST) Subject: [pypy-commit] pypy fix-missing-canraise: Looks like canraise needs to be there... Message-ID: <5676778d.2457c20a.d9372.162e@mx.google.com> Author: Vincent Legoll Branch: fix-missing-canraise Changeset: r81405:b1e7af4fc0c9 Date: 2015-12-20 02:53 +0100 http://bitbucket.org/pypy/pypy/changeset/b1e7af4fc0c9/ Log: Looks like canraise needs to be there... diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -510,6 +510,7 @@ class NewSlice(HLOperation): opname = 'newslice' + canraise = [] def consider(self, annotator): raise AnnotatorError("Cannot use extended slicing in rpython") From pypy.commits at gmail.com Sun Dec 20 04:42:11 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 01:42:11 -0800 (PST) Subject: [pypy-commit] pypy default: add Message-ID: <567677f3.95151c0a.96895.096f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81407:c693d1d616c3 Date: 2015-12-20 10:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c693d1d616c3/ Log: add diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,6 +85,7 @@ .. branch: test-AF_NETLINK .. branch: small-cleanups-misc .. branch: cpyext-slotdefs +.. branch: fix-missing-canraise .. branch: fix-2211 From pypy.commits at gmail.com Sun Dec 20 04:50:35 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 01:50:35 -0800 (PST) Subject: [pypy-commit] buildbot default: compat_i_ble, not compat_a_ble Message-ID: <567679eb.460c1c0a.fabcb.179c@mx.google.com> Author: Armin Rigo Branch: Changeset: r974:2a247b9421bd Date: 2015-12-20 10:50 +0100 http://bitbucket.org/pypy/buildbot/changeset/2a247b9421bd/ Log: compat_i_ble, not compat_a_ble diff --git a/master/templates/layout.html b/master/templates/layout.html --- a/master/templates/layout.html +++ b/master/templates/layout.html @@ -25,7 +25,7 @@ Home - Speed - - Numpy compatability + - NumPy compatibility - Summary (trunk) - Summary - Nightly builds From pypy.commits at gmail.com Sun Dec 20 07:09:33 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 04:09:33 -0800 (PST) Subject: [pypy-commit] pypy default: translation fix in some cases Message-ID: <56769a7d.913bc20a.d29ab.3d6a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81408:6830874c9e26 Date: 2015-12-20 13:08 +0100 http://bitbucket.org/pypy/pypy/changeset/6830874c9e26/ Log: translation fix in some cases diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -415,6 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) + at enforceargs(None, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) From pypy.commits at gmail.com Sun Dec 20 09:18:09 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Dec 2015 06:18:09 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: FOSDEM abstract Message-ID: <5676b8a1.86bb1c0a.883b3.530b@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5575:6854a11aa352 Date: 2015-12-20 15:17 +0100 http://bitbucket.org/pypy/extradoc/changeset/6854a11aa352/ Log: FOSDEM abstract diff --git a/talk/fosdem2016/abstract.txt b/talk/fosdem2016/abstract.txt new file mode 100644 --- /dev/null +++ b/talk/fosdem2016/abstract.txt @@ -0,0 +1,44 @@ +Abstract +-------- + +In this talk, we will see an intro and status of two projects: PyPy, an +alternative Python-in-Python implementation; and CFFI, an alternative to +using the standard C API to extend Python. These two projects are very +different, but CFFI is a possible solution to a problem that hits +notably PyPy --- the CPython C API. + +The CPython C API was great and contributed to the present-day success +of Python, together with tools built on top of it like Cython and SWIG. +I will argue that it may be time to look beyond it. + + +Full description +---------------- + +We will see an intro and status of these two projects (the speaker is +involved in both): + + PyPy: http://pypy.org/ + + CFFI: http://cffi.readthedocs.org/ + +PyPy is an alternative Python implementation. It features a JIT +compiler that gives important speed-ups over CPython, for almost any +program that runs for any amount of time (at least some seconds). + +One of the main issues with PyPy is its forever-alpha-status "cpyext" +module. It is the part that loads and executes CPython extension +modules --- and occasionally segfaults if the stars are not correctly +aligned. The C API is very large, exposes the most obscure +implementation details, and assumes a memory model (reference counting) +that is often different in non-CPython implementations of Python. Thus +"cpyext" is the best-effort solution available for PyPy, but is a hack. +(IronPython has a similar capability.) + +This was partly the motivation for developing CFFI: it is a minimal +layer that allows direct access to C from Python, with no fixed +intermediate C API. It is available for CPython and for PyPy and could +be ported to more implementations. It shares ideas from Cython, ctypes, +and LuaJIT's ffi, but the non-dependence on any fixed C API is a central +point. Some high-visibility projects like Cryptography have switched +to it. From pypy.commits at gmail.com Sun Dec 20 11:48:11 2015 From: pypy.commits at gmail.com (rlamy) Date: Sun, 20 Dec 2015 08:48:11 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Inline rtyper.attachRuntimeTypeInfoFunc into its only caller (and delete dead test) Message-ID: <5676dbcb.c1bb1c0a.a55b1.ffff831d@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81409:6fdf80c493ae Date: 2015-12-20 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6fdf80c493ae/ Log: Inline rtyper.attachRuntimeTypeInfoFunc into its only caller (and delete dead test) diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -13,8 +13,9 @@ from rpython.rtyper.lltypesystem.lltype import ( Ptr, Struct, GcStruct, malloc, cast_pointer, castable, nullptr, RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, Void, FuncType, Bool, Signed, - functionptr) + functionptr, attachRuntimeTypeInfo) from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import rstr from rpython.rtyper.rmodel import ( Repr, getgcflavor, inputconst, warning, mangle) @@ -590,10 +591,17 @@ _callable=graph.func) else: destrptr = None - OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]] - self.rtyper.attachRuntimeTypeInfoFunc(self.object_type, - ll_runtime_type_info, - OBJECT, destrptr) + self.rtyper.call_all_setups() # compute ForwardReferences now + args_s = [SomePtr(Ptr(OBJECT))] + graph = self.rtyper.annotate_helper(ll_runtime_type_info, args_s) + s = self.rtyper.annotation(graph.getreturnvar()) + if (not isinstance(s, SomePtr) or + s.ll_ptrtype != Ptr(RuntimeTypeInfo)): + raise TyperError("runtime type info function returns %r, " + "expected Ptr(RuntimeTypeInfo)" % (s)) + funcptr = self.rtyper.getcallable(graph) + attachRuntimeTypeInfo(self.object_type, funcptr, destrptr) + vtable = self.rclass.getvtable() self.rtyper.set_type_for_typeptr(vtable, self.lowleveltype.TO) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -16,15 +16,15 @@ import py, math from rpython.annotator import model as annmodel, unaryop, binaryop -from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy from rpython.rtyper.error import TyperError from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, - Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, - attachRuntimeTypeInfo, Primitive, getfunctionptr) -from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError + ContainerType, FuncType, functionptr, typeOf, + Primitive, getfunctionptr) +from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair @@ -590,21 +590,6 @@ graph = self.annotate_helper(ll_function, argtypes) return self.getcallable(graph) - def attachRuntimeTypeInfoFunc(self, GCSTRUCT, func, ARG_GCSTRUCT=None, - destrptr=None): - self.call_all_setups() # compute ForwardReferences now - if ARG_GCSTRUCT is None: - ARG_GCSTRUCT = GCSTRUCT - args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] - graph = self.annotate_helper(func, args_s) - s = self.annotation(graph.getreturnvar()) - if (not isinstance(s, SomePtr) or - s.ll_ptrtype != Ptr(RuntimeTypeInfo)): - raise TyperError("runtime type info function %r returns %r, " - "excepted Ptr(RuntimeTypeInfo)" % (func, s)) - funcptr = self.getcallable(graph) - attachRuntimeTypeInfo(GCSTRUCT, funcptr, destrptr) - # register operations from annotation model RPythonTyper._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) diff --git a/rpython/translator/c/test/test_refcount.py b/rpython/translator/c/test/test_refcount.py --- a/rpython/translator/c/test/test_refcount.py +++ b/rpython/translator/c/test/test_refcount.py @@ -106,37 +106,6 @@ assert fn(1) == 4 assert fn(0) == 5 - def test_del_basic(self): - py.test.skip("xxx fix or kill") - S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) - TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) - GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) - glob = lltype.malloc(GLOBAL, immortal=True) - def destructor(s): - glob.x = s.x + 1 - def type_info_S(s): - return lltype.getRuntimeTypeInfo(S) - - def g(n): - s = lltype.malloc(S) - s.x = n - # now 's' should go away - def entrypoint(n): - g(n) - # llop.gc__collect(lltype.Void) - return glob.x - - t = TranslationContext() - t.buildannotator().build_types(entrypoint, [int]) - rtyper = t.buildrtyper() - destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) - rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) - rtyper.specialize() - fn = self.compile_func(entrypoint, None, t) - - res = fn(123) - assert res == 124 - def test_del_catches(self): import os def g(): From pypy.commits at gmail.com Mon Dec 21 04:07:53 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 01:07:53 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5677c169.e251c20a.14fea.ffff85da@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r683:b036c602ac76 Date: 2015-12-21 10:07 +0100 http://bitbucket.org/pypy/pypy.org/changeset/b036c602ac76/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30294 of $80000 (37.9%) + $30297 of $80000 (37.9%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Mon Dec 21 05:13:25 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 21 Dec 2015 02:13:25 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: implemented release gil half way, lock release and reacquire solved (the former uses a serialization point to make the store visible to other cpus, Message-ID: <5677d0c5.ad3ec20a.c9307.ffffa30c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81410:4d4c6bd91480 Date: 2015-12-21 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/4d4c6bd91480/ Log: implemented release gil half way, lock release and reacquire solved (the former uses a serialization point to make the store visible to other cpus, the latter uses compare and swap to set 1 to the lock) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -171,6 +171,9 @@ # save the information mc.STG(r.r14, l.addr(14*WORD, r.SP)) # save the link + RCS2 = r.r10 + RCS3 = r.r12 + LOCAL_VARS_OFFSET = 0 extra_stack_size = LOCAL_VARS_OFFSET + 4 * WORD + 8 extra_stack_size = (extra_stack_size + 15) & ~15 @@ -183,29 +186,24 @@ # need to save many registers: the registers that are anyway # destroyed by the call can be ignored (VOLATILES), and the # non-volatile registers won't be changed here. It only needs - # to save r.RCS1 (used below), r1 and f0 (possible results of - # the call), and two more non-volatile registers (used to store + # to save r2 and f0 (possible results of the call), + # and two more non-volatile registers (used to store # the RPython exception that occurred in the CALL, if any). # # We need to increase our stack frame size a bit to store them. # - self.mc.TRAP2() - #self.mc.LGR(r.SCRATCH, l.addr(0,r.SP)) # SP back chain - #self.mc.STG(r.SCRATCH, l.addr(-extra_stack_size, r.SP.value)) - #self.mc.STG(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) - #self.mc.STG(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) - #self.mc.STG(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) - #self.mc.STG(r.r2.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) - #self.mc.STD(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + self._push_all_regs_to_frame(mc, withfloats, callee_only=True) + mc.STMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) + mc.STG(r.r2, l.addr(2*WORD, r.SP)) + mc.STD(r.f0, l.addr(3*WORD, r.SP)) # slot of r3 is not used here saved_regs = None saved_fp_regs = None - else: # push all volatile registers, push RCS1, and sometimes push RCS2 if withcards: - saved_regs = r.VOLATILES # + [r.RCS1, r.RCS2] + saved_regs = r.VOLATILES + [RCS2] else: - saved_regs = r.VOLATILES # + [r.RCS1] + saved_regs = r.VOLATILES if withfloats: saved_fp_regs = r.MANAGED_FP_REGS else: @@ -221,16 +219,10 @@ # of _reload_frame_if_necessary) # This trashes r0 and r2, which is fine in this case assert argument_loc is not r.r0 - xxx - #self._store_and_reset_exception(mc, r.RCS2, r.RCS3) + self._store_and_reset_exception(mc, RCS2, RCS3) if withcards: - xxx - #kmc.mr(r.RCS2.value, argument_loc.value) - # - # Save the lr into r.RCS1 - #mc.mflr(r.RCS1.value) - # + mc.LGR(RCS2, argument_loc) func = rffi.cast(lltype.Signed, func) # Note: if not 'for_frame', argument_loc is r0, which must carefully # not be overwritten above @@ -242,32 +234,25 @@ mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) if for_frame: - xxx - self._restore_exception(mc, r.RCS2, r.RCS3) + self._restore_exception(mc, RCS2, RCS3) if withcards: # A final andix before the blr, for the caller. Careful to # not follow this instruction with another one that changes # the status of cr0! card_marking_mask = descr.jit_wb_cards_set_singlebyte - mc.trap() - #mc.lbz(r.RCS2.value, r.RCS2.value, descr.jit_wb_if_flag_byteofs) - #mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) + mc.LLGC(RCS2, l.addr(descr.jit_wb_if_flag_byteofs, RCS2)) + mc.NILL(RCS2, l.imm(card_marking_mask & 0xFF)) if for_frame: - self.mc.trap() - #self.mc.ld(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) - #self.mc.ld(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) - #self.mc.ld(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) - #self.mc.ld(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) - #self.mc.lfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) - #self.mc.addi(r.SP.value, r.SP.value, extra_stack_size) - + mc.LMG(r.r10, r.r12, l.addr(10*WORD, r.SP)) + mc.LG(r.r2, l.addr(2*WORD, r.SP)) + mc.LD(r.f0, l.addr(3*WORD, r.SP)) # slot of r3 is not used here else: self._pop_core_regs_from_jitframe(mc, saved_regs) self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) - mc.LG(r.r14, l.addr(14*WORD, r.SP)) # restore the link + mc.LG(r.RETURN, l.addr(14*WORD, r.SP)) # restore the link mc.BCR(c.ANY, r.RETURN) self.mc = old_mc @@ -897,6 +882,37 @@ self.mc.LMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) self.jmpto(r.r14) + def _push_all_regs_to_stack(self, mc, withfloats, callee_only=False): + base_ofs = 2*WORD + if callee_only: + regs = ZARCHRegisterManager.save_around_call_regs + else: + regs = r.registers[2:] + mc.STMG(regs[0], regs[1], l.addr(base_ofs, r.SP)) + if withfloats: + xxx + + def _push_all_regs_to_frame(self, mc, ignored_regs, withfloats, callee_only=False): + # Push all general purpose registers + base_ofs = self.cpu.get_baseofs_of_frame_field() + if callee_only: + regs = gpr_reg_mgr_cls.save_around_call_regs + else: + regs = gpr_reg_mgr_cls.all_regs + for gpr in regs: + if gpr not in ignored_regs: + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_br(v * WORD + base_ofs, gpr.value) + if withfloats: + if IS_X86_64: + coeff = 1 + else: + coeff = 2 + # Push all XMM regs + ofs = len(gpr_reg_mgr_cls.all_regs) + for i in range(len(xmm_reg_mgr_cls.all_regs)): + mc.MOVSD_bx((ofs + i * coeff) * WORD + base_ofs, i) + def _push_core_regs_to_jitframe(self, mc, includes=r.registers): if len(includes) == 0: return diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -14,8 +14,9 @@ GPR_ARGS = [r.r2, r.r3, r.r4, r.r5, r.r6] FPR_ARGS = [r.f0, r.f2, r.f4, r.f6] - #RFASTGILPTR = r.RCS2 - #RSHADOWOLD = r.RCS3 + RSHADOWOLD = r.r9 + RSHADOWPTR = r.r10 + RFASTGILPTR = r.r12 def __init__(self, assembler, fnloc, arglocs, resloc): AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, @@ -148,6 +149,12 @@ def call_releasegil_addr_and_move_real_arguments(self, fastgil): assert self.is_call_release_gil + RSHADOWOLD = self.RSHADOWOLD + RSHADOWPTR = self.RSHADOWPTR + RFASTGILPTR = self.RFASTGILPTR + # + # assumes RSHADOWOLD to be r9, stores all up to r15 + self.mc.STMG(RSHADOWOLD, r.r15, l.addr(9 * WORD, r.SP)) # # Save this thread's shadowstack pointer into r29, for later comparison gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap @@ -155,13 +162,13 @@ if gcrootmap.is_shadow_stack: rst = gcrootmap.get_root_stack_top_addr() self.mc.load_imm(RSHADOWPTR, rst) - self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0) + self.mc.LGR(RSHADOWOLD, RSHADOWPTR) # # change 'rpy_fastgil' to 0 (it should be non-zero right now) self.mc.load_imm(RFASTGILPTR, fastgil) - self.mc.li(r.r0.value, 0) - self.mc.lwsync() - self.mc.std(r.r0.value, RFASTGILPTR.value, 0) + self.mc.LGHI(r.SCRATCH, l.imm(0)) + self.mc.STG(r.SCRATCH, l.addr(0, RFASTGILPTR)) + self.mc.sync() # renders the store visible to other cpus # if not we_are_translated(): # for testing: we should not access self.mc.AGHI(r.SPP, l.imm(1)) # r31 any more @@ -169,21 +176,22 @@ def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.zarch.codebuilder import InstrBuilder - xxx # try to reacquire the lock. The following registers are still # valid from before the call: - RSHADOWPTR = self.RSHADOWPTR # r30: &root_stack_top - RFASTGILPTR = self.RFASTGILPTR # r29: &fastgil - RSHADOWOLD = self.RSHADOWOLD # r28: previous val of root_stack_top + RSHADOWPTR = self.RSHADOWPTR # r9: &root_stack_top + RFASTGILPTR = self.RFASTGILPTR # r10: &fastgil + RSHADOWOLD = self.RSHADOWOLD # r12: previous val of root_stack_top - # Equivalent of 'r10 = __sync_lock_test_and_set(&rpy_fastgil, 1);' - self.mc.li(r.r9.value, 1) + # Equivalent of 'r14 = __sync_lock_test_and_set(&rpy_fastgil, 1);' + self.mc.LGHI(r.r11, l.imm(1)) + self.mc.LGHI(r.r14, l.imm(0)) retry_label = self.mc.currpos() - self.mc.ldarx(r.r10.value, 0, RFASTGILPTR.value) # load the lock value - self.mc.stdcxx(r.r9.value, 0, RFASTGILPTR.value) # try to claim lock - self.mc.bc(6, 2, retry_label - self.mc.currpos()) # retry if failed - self.mc.isync() + # compare and swap, only succeeds if the the contents of the + # lock is equal to r14 (= 0) + self.mc.CSG(r.r14, r.r11, l.addr(RFASTGILPTR)) # try to claim lock + self.mc.BRC(c.EQ, l.imm(retry_label - self.mc.currpos())) # retry if failed + #self.mc.sync() self.mc.cmpdi(0, r.r10.value, 0) b1_location = self.mc.currpos() @@ -244,7 +252,6 @@ def write_real_errno(self, save_err): - xxx if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read '*_errno' and write it into the # real 'errno'. A lot of registers are free here, notably @@ -254,19 +261,19 @@ else: rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - self.mc.ld(r.r11.value, r.SP.value, - THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) - self.mc.lwz(r.r0.value, r.r11.value, rpy_errno) - self.mc.ld(r.r11.value, r.r11.value, p_errno) - self.mc.stw(r.r0.value, r.r11.value, 0) + self.mc.LG(r.r11, + l.addr(THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp, r.SP)) + self.mc.LGH(r.SCRATCH2, l.addr(rpy_errno, r.r11)) + self.mc.LG(r.r11, l.addr(p_errno, r.r11)) + self.mc.STHY(r.SCRATCH2, l.addr(0,r.r11)) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - self.mc.ld(r.r11.value, r.SP.value, - THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) - self.mc.ld(r.r11.value, r.r11.value, p_errno) - self.mc.li(r.r0.value, 0) - self.mc.stw(r.r0.value, r.r11.value, 0) + self.mc.LG(r.r11, + l.addr(THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp, r.SP)) + self.mc.LG(r.r11, l.addr(p_errno, r.r11)) + self.mc.LGHI(r.SCRATCH, 0) + self.mc.STHY(r.SCRATCH, l.addr(0,r.r11)) def read_real_errno(self, save_err): if save_err & rffi.RFFI_SAVE_ERRNO: diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -174,6 +174,10 @@ self.LGFI(dest_reg, l.imm(word & 0xFFFFffff)) self.IIHF(dest_reg, l.imm((word >> 32) & 0xFFFFffff)) + def sync(self): + # see sync. section of the zarch manual! + self.BCR_rr(0xf,0) + def raw_call(self, call_reg=r.RETURN): """Emit a call to the address stored in the register 'call_reg', which must be either RAW_CALL_REG or r12. This is a regular C diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -41,7 +41,9 @@ # rotating # rotate, then insert selected bits - 'RISBGN': ('rie_f', ['\xEC','\x59']), + # on the VM the miscellaneous-instruction-extensions + # does not seem to be installed, sad but true... + # 'RISBGN': ('rie_f', ['\xEC','\x59']), # invert & negative & absolute 'LPGR': ('rre', ['\xB9','\x00']), @@ -107,6 +109,9 @@ 'XI': ('si', ['\x97']), 'XIY': ('siy', ['\xEB','\x57']), + 'XILF': ('ril', ['\xC0','\x06']), + 'XIHF': ('ril', ['\xC0','\x07']), + # OR immediate 'OIHH': ('ri_u', ['\xA5', '\x08']), 'OIHL': ('ri_u', ['\xA5', '\x09']), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -230,8 +230,8 @@ if is_call_release_gil: saveerrloc = arglocs[1] - assert saveerrloc.is_in_pool() - cb.emit_call_release_gil(saveerrloc) + assert saveerrloc.is_imm() + cb.emit_call_release_gil(saveerrloc.value) else: cb.emit() @@ -490,12 +490,15 @@ # compute in r2 the index of the bit inside the byte: # (index >> card_page_shift) & 7 # 0x80 sets zero flag. will store 0 into all selected bits - mc.RISBGN(r.SCRATCH2, loc_index, l.imm(3), l.imm(0x80 | 63), l.imm(61)) + # cannot be used on the VM + # mc.RISBGN(r.SCRATCH, loc_index, l.imm(3), l.imm(0x80 | 63), l.imm(61)) + mc.SLAG(r.SCRATCH, loc_index, l.addr(3)) + mc.NILL(r.SCRATCH, l.imm(0xff)) #mc.rldicl(r.SCRATCH2.value, loc_index.value, 64 - n, 61) # set r2 to 1 << r2 - mc.LGHI(r.SCRATCH, l.imm(1)) - mc.SLAG(r.SCRATCH2, r.SCRATCH, l.addr(0,r.SCRATCH2)) + mc.LGHI(r.SCRATCH2, l.imm(1)) + mc.SLAG(r.SCRATCH, r.SCRATCH2, l.addr(0,r.SCRATCH)) # set this bit inside the byte of interest addr = l.addr(0, loc_base, tmp_loc) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -65,6 +65,12 @@ rop.GC_LOAD_INDEXED_R, rop.GC_LOAD_INDEXED_I,): return + elif op.is_call_release_gil(): + for arg in op.getarglist()[1:]: + if arg.is_constant(): + self.offset_map[arg] = self.size + self.reserve_literal(8) + return for arg in op.getarglist(): if arg.is_constant(): self.offset_map[arg] = self.size diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -859,14 +859,27 @@ prepare_call_may_force_f = _prepare_call_may_force prepare_call_may_force_n = _prepare_call_may_force + def _prepare_call_release_gil(self, op): + save_all_regs = False + errno_box = op.getarg(0) + assert isinstance(errno_box, ConstInt) + args = [None, l.imm(errno_box.value)] + for i in range(1,op.numargs()): + args.append(self.loc(op.getarg(i))) + self._spill_before_call(save_all_regs) + if op.type != VOID: + resloc = self.after_call(op) + args[0] = resloc + return args + + prepare_call_release_gil_i = _prepare_call_release_gil + prepare_call_release_gil_f = _prepare_call_release_gil + prepare_call_release_gil_n = _prepare_call_release_gil + def prepare_force_token(self, op): res_loc = self.force_allocate_reg(op) return [res_loc] - prepare_call_release_gil_i = _prepare_call_may_force - prepare_call_release_gil_f = _prepare_call_may_force - prepare_call_release_gil_n = _prepare_call_may_force - def _prepare_call_assembler(self, op): locs = self.locs_for_call_assembler(op) self._spill_before_call(save_all_regs=True) From pypy.commits at gmail.com Mon Dec 21 06:56:57 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 21 Dec 2015 03:56:57 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: first call to c (with release gil) passed, wrote 3 registers below the stack pointer. ppc has many registers, but zarch does not. Message-ID: <5677e909.e935c20a.161dc.ffffbebc@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81411:fd4fa9b6a5d1 Date: 2015-12-21 12:56 +0100 http://bitbucket.org/pypy/pypy/changeset/fd4fa9b6a5d1/ Log: first call to c (with release gil) passed, wrote 3 registers below the stack pointer. ppc has many registers, but zarch does not. thus I have chosen a middle ground between x86 and ppc. on the s390x 3 registers (that are very convenient for the call_release_gil instr) are saved before the instruction below the stack pointer. the following allocated stack is thus 3 words bigger diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -10,6 +10,7 @@ # +------------------------------+ | # | .... | | # | spill and local variables | | +# | used by call release gil | | # | .... | | # +------------------------------+ | # | .... | | diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -3,6 +3,7 @@ STD_FRAME_SIZE_IN_BYTES) import rpython.jit.backend.zarch.locations as l import rpython.jit.backend.zarch.registers as r +import rpython.jit.backend.zarch.conditions as c from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder from rpython.jit.backend.llsupport.jump import remap_frame_layout @@ -14,9 +15,9 @@ GPR_ARGS = [r.r2, r.r3, r.r4, r.r5, r.r6] FPR_ARGS = [r.f0, r.f2, r.f4, r.f6] - RSHADOWOLD = r.r9 - RSHADOWPTR = r.r10 - RFASTGILPTR = r.r12 + RSHADOWOLD = r.r8 + RSHADOWPTR = r.r9 + RFASTGILPTR = r.r10 def __init__(self, assembler, fnloc, arglocs, resloc): AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, @@ -122,7 +123,6 @@ if gcrootmap.is_shadow_stack and self.is_call_release_gil: # in this mode, RSHADOWOLD happens to contain the shadowstack # top at this point, so reuse it instead of loading it again - xxx ssreg = self.RSHADOWOLD self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) @@ -153,8 +153,8 @@ RSHADOWPTR = self.RSHADOWPTR RFASTGILPTR = self.RFASTGILPTR # - # assumes RSHADOWOLD to be r9, stores all up to r15 - self.mc.STMG(RSHADOWOLD, r.r15, l.addr(9 * WORD, r.SP)) + self.mc.STMG(RSHADOWOLD, self.RFASTGILPTR, l.addr(-3*WORD, r.SP)) + self.subtracted_to_sp += 4*WORD # # Save this thread's shadowstack pointer into r29, for later comparison gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap @@ -169,13 +169,10 @@ self.mc.LGHI(r.SCRATCH, l.imm(0)) self.mc.STG(r.SCRATCH, l.addr(0, RFASTGILPTR)) self.mc.sync() # renders the store visible to other cpus - # - if not we_are_translated(): # for testing: we should not access - self.mc.AGHI(r.SPP, l.imm(1)) # r31 any more def move_real_result_and_call_reacqgil_addr(self, fastgil): - from rpython.jit.backend.zarch.codebuilder import InstrBuilder + from rpython.jit.backend.zarch.codebuilder import OverwritingBuilder # try to reacquire the lock. The following registers are still # valid from before the call: @@ -184,19 +181,19 @@ RSHADOWOLD = self.RSHADOWOLD # r12: previous val of root_stack_top # Equivalent of 'r14 = __sync_lock_test_and_set(&rpy_fastgil, 1);' - self.mc.LGHI(r.r11, l.imm(1)) - self.mc.LGHI(r.r14, l.imm(0)) + self.mc.LGHI(r.SCRATCH, l.imm(1)) retry_label = self.mc.currpos() # compare and swap, only succeeds if the the contents of the # lock is equal to r14 (= 0) - self.mc.CSG(r.r14, r.r11, l.addr(RFASTGILPTR)) # try to claim lock - self.mc.BRC(c.EQ, l.imm(retry_label - self.mc.currpos())) # retry if failed - #self.mc.sync() + self.mc.LG(r.r14, l.addr(0, RFASTGILPTR)) + self.mc.CSG(r.r14, r.SCRATCH, l.addr(0, RFASTGILPTR)) # try to claim lock + self.mc.BRC(c.NE, l.imm(retry_label - self.mc.currpos())) # retry if failed + self.mc.sync() - self.mc.cmpdi(0, r.r10.value, 0) + self.mc.CGHI(r.r14, l.imm0) b1_location = self.mc.currpos() - self.mc.trap() # boehm: patched with a BEQ: jump if r10 is zero - # shadowstack: patched with BNE instead + self.mc.trap() # boehm: patched with a BEQ: jump if r14 is zero + self.mc.write('\x00'*4) # shadowstack: patched with BNE instead if self.asm.cpu.gc_ll_descr.gcrootmap: # When doing a call_release_gil with shadowstack, there @@ -205,50 +202,50 @@ # thread. So here we check if the shadowstack pointer # is still the same as before we released the GIL (saved # in RSHADOWOLD), and if not, we fall back to 'reacqgil_addr'. - self.mc.load(r.r9.value, RSHADOWPTR.value, 0) - self.mc.cmpdi(0, r.r9.value, RSHADOWOLD.value) + self.mc.CGR(RSHADOWPTR, RSHADOWOLD) bne_location = b1_location b1_location = self.mc.currpos() self.mc.trap() + self.mc.write('\x00'*4) # revert the rpy_fastgil acquired above, so that the # general 'reacqgil_addr' below can acquire it again... - # (here, r10 is conveniently zero) - self.mc.std(r.r10.value, RFASTGILPTR.value, 0) + # (here, r14 is conveniently zero) + self.mc.STG(r.r14, l.addr(0,RFASTGILPTR)) - pmc = InstrBuilder(self.mc, bne_location, 1) - xxx - pmc.BCR(l.imm(0xf), self.mc.currpos() - bne_location) + pmc = OverwritingBuilder(self.mc, bne_location, 1) + pmc.BCRL(c.NE, self.mc.currpos() - bne_location) pmc.overwrite() # # Yes, we need to call the reacqgil() function. # save the result we just got RSAVEDRES = RFASTGILPTR # can reuse this reg here reg = self.resloc - xxx PARAM_SAVE_AREA_OFFSET = 0 if reg is not None: if reg.is_core_reg(): - self.mc.mr(RSAVEDRES.value, reg.value) + self.mc.LGR(RSAVEDRES, reg) elif reg.is_fp_reg(): + xxx self.mc.stfd(reg.value, r.SP.value, PARAM_SAVE_AREA_OFFSET + 7 * WORD) self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) self.mc.raw_call() if reg is not None: if reg.is_core_reg(): - self.mc.mr(reg.value, RSAVEDRES.value) + self.mc.LGR(reg, RSAVEDRES) elif reg.is_fp_reg(): + xxx self.mc.lfd(reg.value, r.SP.value, PARAM_SAVE_AREA_OFFSET + 7 * WORD) # replace b1_location with BEQ(here) pmc = OverwritingBuilder(self.mc, b1_location, 1) - pmc.beq(self.mc.currpos() - b1_location) + pmc.BRCL(c.EQ, l.imm(self.mc.currpos() - b1_location)) pmc.overwrite() - if not we_are_translated(): # for testing: now we can access - self.mc.addi(r.SPP.value, r.SPP.value, -1) # r31 again + # restore the values that might have been overwritten + self.mc.LMG(RSHADOWOLD, RFASTGILPTR, l.addr(-3*WORD, r.SP)) def write_real_errno(self, save_err): @@ -261,16 +258,14 @@ else: rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - self.mc.LG(r.r11, - l.addr(THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp, r.SP)) + self.mc.LG(r.r11, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) self.mc.LGH(r.SCRATCH2, l.addr(rpy_errno, r.r11)) self.mc.LG(r.r11, l.addr(p_errno, r.r11)) self.mc.STHY(r.SCRATCH2, l.addr(0,r.r11)) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - self.mc.LG(r.r11, - l.addr(THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp, r.SP)) + self.mc.LG(r.r11, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) self.mc.LG(r.r11, l.addr(p_errno, r.r11)) self.mc.LGHI(r.SCRATCH, 0) self.mc.STHY(r.SCRATCH, l.addr(0,r.r11)) diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -42,7 +42,9 @@ # rotating # rotate, then insert selected bits # on the VM the miscellaneous-instruction-extensions - # does not seem to be installed, sad but true... + # does not seem to be installed + # cpu fails at this instruction, and gnu assembler + # does not recognize mnemonic # 'RISBGN': ('rie_f', ['\xEC','\x59']), # invert & negative & absolute @@ -72,6 +74,9 @@ 'CGIB': ('ris', ['\xEC','\xFC']), 'CGRJ': ('rie_b', ['\xEC','\x64']), 'CLGRJ': ('rie_b', ['\xEC','\x65']), + # compare and swap + 'CSG': ('rsy_a', ['\xEB','\x30']), + } logic_mnemonic_codes = { @@ -232,7 +237,7 @@ 'DIEBR': ('rrf_b', ['\xB3','\x53'], 'r,r,r,m'), 'DIDBR': ('rrf_b', ['\xB3','\x5B'], 'r,r,r,m'), - # COMPARISON + # compare 'CEBR': ('rre', ['\xB3','\x09']), 'CDBR': ('rre', ['\xB3','\x19']), 'CEB': ('rxe', ['\xED','\x09'], 'r,bidl,-'), From pypy.commits at gmail.com Mon Dec 21 12:29:25 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 09:29:25 -0800 (PST) Subject: [pypy-commit] pypy default: Mention the WIP about embedding and cffi. Message-ID: <567836f5.0b831c0a.231fb.54ed@mx.google.com> Author: Armin Rigo Branch: Changeset: r81412:6293bab67e95 Date: 2015-12-21 18:27 +0100 http://bitbucket.org/pypy/pypy/changeset/6293bab67e95/ Log: Mention the WIP about embedding and cffi. diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. From pypy.commits at gmail.com Mon Dec 21 13:10:09 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 10:10:09 -0800 (PST) Subject: [pypy-commit] cffi default: bump to 1.4.2 (reason in issue #239) Message-ID: <56784081.8673c20a.386b4.6241@mx.google.com> Author: Armin Rigo Branch: Changeset: r2491:b65bd0902b74 Date: 2015-12-21 19:03 +0100 http://bitbucket.org/cffi/cffi/changeset/b65bd0902b74/ Log: bump to 1.4.2 (reason in issue #239) diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6498,7 +6498,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.1"); + v = PyText_FromString("1.4.2"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.4.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz - - MD5: 73c2047f598ac7d8b7a5cd8e6d835c42 + - MD5: ... - - SHA: 0a00384281bca841380766b0b41087d105e428a5 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,12 @@ ====================== +v1.4.2 +====== + +Nothing changed from v1.4.1. + + v1.4.1 ====== diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.1', + version='1.4.2', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, From pypy.commits at gmail.com Mon Dec 21 13:10:11 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 10:10:11 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: hg merge default Message-ID: <56784083.a6ebc20a.314e0.5cae@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2492:2a74753abaa9 Date: 2015-12-21 19:04 +0100 http://bitbucket.org/cffi/cffi/changeset/2a74753abaa9/ Log: hg merge default diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6498,7 +6498,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.4.1"); + v = PyText_FromString("1.4.2"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.4.1' +release = '1.4.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz - - MD5: 73c2047f598ac7d8b7a5cd8e6d835c42 + - MD5: ... - - SHA: 0a00384281bca841380766b0b41087d105e428a5 + - SHA: ... * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,12 @@ ====================== +v1.4.2 +====== + +Nothing changed from v1.4.1. + + v1.4.1 ====== diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.4.1', + version='1.4.2', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, From pypy.commits at gmail.com Mon Dec 21 13:12:32 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 10:12:32 -0800 (PST) Subject: [pypy-commit] cffi release-1.4: md5/sha1 Message-ID: <56784110.e16ec20a.f0264.31f3@mx.google.com> Author: Armin Rigo Branch: release-1.4 Changeset: r2493:52d6cb07c0e7 Date: 2015-12-21 19:11 +0100 http://bitbucket.org/cffi/cffi/changeset/52d6cb07c0e7/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz - - MD5: ... + - MD5: 81357fe5042d00650b85b728cc181df2 - - SHA: ... + - SHA: 76cff6f1ff5bfb2b9c6c8e2cfa8bf90b5c944394 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Mon Dec 21 13:12:34 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 10:12:34 -0800 (PST) Subject: [pypy-commit] cffi default: hg merge release-1.4 Message-ID: <56784112.a658c20a.49ee6.669e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2494:f65126989772 Date: 2015-12-21 19:12 +0100 http://bitbucket.org/cffi/cffi/changeset/f65126989772/ Log: hg merge release-1.4 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz - - MD5: ... + - MD5: 81357fe5042d00650b85b728cc181df2 - - SHA: ... + - SHA: 76cff6f1ff5bfb2b9c6c8e2cfa8bf90b5c944394 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From pypy.commits at gmail.com Mon Dec 21 13:13:58 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 10:13:58 -0800 (PST) Subject: [pypy-commit] pypy default: update to cffi 1.4.2 Message-ID: <56784166.55b21c0a.435b3.5f1a@mx.google.com> Author: Armin Rigo Branch: Changeset: r81413:d96bd17c3018 Date: 2015-12-21 19:07 +0100 http://bitbucket.org/pypy/pypy/changeset/d96bd17c3018/ Log: update to cffi 1.4.2 diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.1" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): From pypy.commits at gmail.com Mon Dec 21 16:19:54 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 13:19:54 -0800 (PST) Subject: [pypy-commit] cffi default: fix Message-ID: <56786cfa.c4b1c20a.b3ce9.ffffaf1e@mx.google.com> Author: Armin Rigo Branch: Changeset: r2495:8f48bf33a51e Date: 2015-12-21 22:19 +0100 http://bitbucket.org/cffi/cffi/changeset/8f48bf33a51e/ Log: fix diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -823,7 +823,7 @@ hurt) to say ``WINAPI`` or ``__stdcall`` when declaring a plain function in the ``cdef()``. (The difference can still be seen if you take explicitly a pointer to this function with ``ffi.addressof()``, -or if the function is ``CFFI_CALL_PYTHON``.) +or if the function is ``extern "Python"``.) These calling convention specifiers are accepted but ignored on any platform other than 32-bit Windows. From pypy.commits at gmail.com Mon Dec 21 16:20:30 2015 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 21 Dec 2015 13:20:30 -0800 (PST) Subject: [pypy-commit] pypy py3.3: fix translation on OSX Message-ID: <56786d1e.2457c20a.d9372.ffffa7b3@mx.google.com> Author: Philip Jenvey Branch: py3.3 Changeset: r81415:223e23e2bfd3 Date: 2015-12-21 13:16 -0800 http://bitbucket.org/pypy/pypy/changeset/223e23e2bfd3/ Log: fix translation on OSX diff --git a/pypy/module/time/__init__.py b/pypy/module/time/__init__.py --- a/pypy/module/time/__init__.py +++ b/pypy/module/time/__init__.py @@ -1,6 +1,6 @@ from pypy.interpreter.mixedmodule import MixedModule -from .interp_time import CLOCK_CONSTANTS, cConfig +from .interp_time import CLOCK_CONSTANTS, HAS_CLOCK_GETTIME, cConfig import os _WIN = os.name == "nt" @@ -24,10 +24,11 @@ 'process_time': 'interp_time.process_time', } - if os.name == "posix": + if HAS_CLOCK_GETTIME: interpleveldefs['clock_gettime'] = 'interp_time.clock_gettime' interpleveldefs['clock_settime'] = 'interp_time.clock_settime' interpleveldefs['clock_getres'] = 'interp_time.clock_getres' + if os.name == "posix": interpleveldefs['tzset'] = 'interp_time.tzset' for constant in CLOCK_CONSTANTS: diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -14,6 +14,7 @@ _POSIX = os.name == "posix" _WIN = os.name == "nt" +_MACOSX = sys.platform == "darwin" _CYGWIN = sys.platform == "cygwin" _time_zones = [] @@ -106,6 +107,8 @@ _includes = ["time.h"] if _POSIX: _includes.append('sys/time.h') +if _MACOSX: + _includes.append('mach/mach_time.h') class CConfig: _compilation_info_ = ExternalCompilationInfo( @@ -147,7 +150,7 @@ ("tm_mon", rffi.INT), ("tm_year", rffi.INT), ("tm_wday", rffi.INT), ("tm_yday", rffi.INT), ("tm_isdst", rffi.INT)]) -if sys.platform == 'darwin': +if _MACOSX: CConfig.TIMEBASE_INFO = platform.Struct("struct mach_timebase_info", [ ("numer", rffi.UINT), ("denom", rffi.UINT), @@ -181,6 +184,7 @@ timeval = cConfig.timeval CLOCKS_PER_SEC = cConfig.CLOCKS_PER_SEC +HAS_CLOCK_GETTIME = cConfig.has_clock_gettime clock_t = cConfig.clock_t tm = cConfig.tm glob_buf = lltype.malloc(tm, flavor='raw', zero=True, immortal=True) @@ -194,7 +198,7 @@ c_mktime = external('mktime', [TM_P], rffi.TIME_T) c_localtime = external('localtime', [rffi.TIME_TP], TM_P, save_err=rffi.RFFI_SAVE_ERRNO) -if cConfig.has_clock_gettime: +if HAS_CLOCK_GETTIME: from rpython.rlib.rtime import TIMESPEC, c_clock_gettime c_clock_settime = external('clock_settime', [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, @@ -619,7 +623,7 @@ return space.wrap(float(tt)) -if cConfig.has_clock_gettime: +if HAS_CLOCK_GETTIME: def _timespec_to_seconds(timespec): return int(timespec.c_tv_sec) + int(timespec.c_tv_nsec) * 1e-9 @@ -728,20 +732,22 @@ def monotonic(space): return space.wrap(_GetTickCount64() * 1e-3) -elif sys.platform == 'darwin': - # untested so far +elif _MACOSX: c_mach_timebase_info = external('mach_timebase_info', - [lltype.Ptr(TIMEBASE_INFO)], lltype.Void) - c_mach_absolute_time = external('mach_absolute_time', [], lltype.ULONGLONG) + [lltype.Ptr(cConfig.TIMEBASE_INFO)], + lltype.Void) + c_mach_absolute_time = external('mach_absolute_time', [], rffi.ULONGLONG) - timebase_info = lltype.malloc(TIMEBASE_INFO, flavor='raw', zero=True, - immortal=True) + timebase_info = lltype.malloc(cConfig.TIMEBASE_INFO, flavor='raw', + zero=True, immortal=True) - def monotonic(): - if timebase_info.denom == 0: - mach_timebase_info(timebase_info) - time = mach_absolute_time() - nanosecs = time * timebase_info.numer / timebase_info.denom + def monotonic(space): + if rffi.getintfield(timebase_info, 'c_denom') == 0: + c_mach_timebase_info(timebase_info) + time = rffi.cast(lltype.Signed, c_mach_absolute_time()) + numer = rffi.getintfield(timebase_info, 'c_numer') + denom = rffi.getintfield(timebase_info, 'c_denom') + nanosecs = time * numer / denom secs = nanosecs / 10**9 rest = nanosecs % 10**9 return space.wrap(float(secs) + float(rest) * 1e-9) @@ -789,7 +795,7 @@ have_times = hasattr(rposix, 'c_times') def process_time(space): - if cConfig.has_clock_gettime and ( + if HAS_CLOCK_GETTIME and ( cConfig.CLOCK_PROF is not None or cConfig.CLOCK_PROCESS_CPUTIME_ID is not None): if cConfig.CLOCK_PROF is not None: @@ -811,7 +817,7 @@ if have_times: with lltype.scoped_alloc(rposix.TMS) as tms: ret = rposix.c_times(tms) - if ret != -1: - cpu_time = tms.c_tms_utime + tms.c_tms_stime + if rffi.cast(lltype.Signed, ret) != -1: + cpu_time = float(tms.c_tms_utime + tms.c_tms_stime) return space.wrap(cpu_time / rposix.CLOCK_TICKS_PER_SECOND) return clock(space) From pypy.commits at gmail.com Mon Dec 21 16:20:28 2015 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 21 Dec 2015 13:20:28 -0800 (PST) Subject: [pypy-commit] pypy py3.3: fix for BSDs lacking pty.h Message-ID: <56786d1c.0b831c0a.231fb.ffffaafb@mx.google.com> Author: Philip Jenvey Branch: py3.3 Changeset: r81414:1a3db47423b9 Date: 2015-12-16 16:27 -0800 http://bitbucket.org/pypy/pypy/changeset/1a3db47423b9/ Log: fix for BSDs lacking pty.h (grafted from b3e7453fb7e34810c4a1dc9bff33fdf213021615) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -247,9 +247,15 @@ includes = ['io.h', 'sys/utime.h', 'sys/types.h'] libraries = [] else: + if sys.platform.startswith(('darwin', 'netbsd', 'openbsd')): + _ptyh = 'util.h' + elif sys.platform.startswith('freebsd'): + _ptyh = 'libutil.h' + else: + _ptyh = 'pty.h' includes = ['unistd.h', 'sys/types.h', 'sys/wait.h', 'utime.h', 'sys/time.h', 'sys/times.h', - 'grp.h', 'dirent.h', 'pty.h'] + 'grp.h', 'dirent.h', _ptyh] libraries = ['util'] eci = ExternalCompilationInfo( includes=includes, From pypy.commits at gmail.com Mon Dec 21 16:31:19 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Dec 2015 13:31:19 -0800 (PST) Subject: [pypy-commit] cffi default: Fix the reason Message-ID: <56786fa7.ea9cc20a.f4bb2.ffffa622@mx.google.com> Author: Armin Rigo Branch: Changeset: r2496:123d9f27f75c Date: 2015-12-21 22:31 +0100 http://bitbucket.org/cffi/cffi/changeset/123d9f27f75c/ Log: Fix the reason diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -1311,8 +1311,8 @@ function with a ``char *`` argument to which you pass a Python string will not actually modify the array of characters passed in, and so passes directly a pointer inside the Python string object. - (PyPy might in the future do the same, but it is harder because a - string object can move in memory when the GC runs.) + (PyPy might in the future do the same, but it is harder because + strings are not naturally zero-terminated in PyPy.) `(**)` C function calls are done with the GIL released. From pypy.commits at gmail.com Tue Dec 22 03:01:18 2015 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Dec 2015 00:01:18 -0800 (PST) Subject: [pypy-commit] pypy vendor/stdlib: update the 2.7 stdlib to 2.7.11 Message-ID: <5679034e.84ab1c0a.1976a.316f@mx.google.com> Author: Philip Jenvey Branch: vendor/stdlib Changeset: r81416:27d2dac603eb Date: 2015-12-21 23:16 -0800 http://bitbucket.org/pypy/pypy/changeset/27d2dac603eb/ Log: update the 2.7 stdlib to 2.7.11 diff too long, truncating to 2000 out of 24636 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -120,11 +120,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. @@ -308,13 +304,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -335,6 +333,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) diff --git a/lib-python/2.7/UserDict.py b/lib-python/2.7/UserDict.py --- a/lib-python/2.7/UserDict.py +++ b/lib-python/2.7/UserDict.py @@ -1,7 +1,24 @@ """A more or less complete user-defined wrapper around dictionary objects.""" class UserDict: - def __init__(self, dict=None, **kwargs): + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is " + "deprecated", PendingDeprecationWarning, + stacklevel=2) + else: + dict = None self.data = {} if dict is not None: self.update(dict) @@ -43,7 +60,23 @@ def itervalues(self): return self.data.itervalues() def values(self): return self.data.values() def has_key(self, key): return key in self.data - def update(self, dict=None, **kwargs): + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + PendingDeprecationWarning, stacklevel=2) + else: + dict = None if dict is None: pass elif isinstance(dict, UserDict): diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -453,6 +453,7 @@ for key in self._mapping: yield key +KeysView.register(type({}.viewkeys())) class ItemsView(MappingView, Set): @@ -473,6 +474,7 @@ for key in self._mapping: yield (key, self._mapping[key]) +ItemsView.register(type({}.viewitems())) class ValuesView(MappingView): @@ -486,6 +488,7 @@ for key in self._mapping: yield self._mapping[key] +ValuesView.register(type({}.viewvalues())) class MutableMapping(Mapping): diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -7,6 +7,7 @@ import os import abc import codecs +import sys import warnings import errno # Import thread instead of threading to reduce startup cost @@ -1497,6 +1498,11 @@ if not isinstance(encoding, basestring): raise ValueError("invalid encoding: %r" % encoding) + if sys.py3kwarning and not codecs.lookup(encoding)._is_text_encoding: + msg = ("%r is not a text encoding; " + "use codecs.open() to handle arbitrary codecs") + warnings.warnpy3k(msg % encoding, stacklevel=2) + if errors is None: errors = "strict" else: diff --git a/lib-python/2.7/base64.py b/lib-python/2.7/base64.py --- a/lib-python/2.7/base64.py +++ b/lib-python/2.7/base64.py @@ -7,6 +7,7 @@ import re import struct +import string import binascii @@ -52,7 +53,7 @@ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] if altchars is not None: - return _translate(encoded, {'+': altchars[0], '/': altchars[1]}) + return encoded.translate(string.maketrans(b'+/', altchars[:2])) return encoded @@ -68,7 +69,7 @@ string. """ if altchars is not None: - s = _translate(s, {altchars[0]: '+', altchars[1]: '/'}) + s = s.translate(string.maketrans(altchars[:2], '+/')) try: return binascii.a2b_base64(s) except binascii.Error, msg: @@ -92,13 +93,16 @@ """ return b64decode(s) +_urlsafe_encode_translation = string.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = string.maketrans(b'-_', b'+/') + def urlsafe_b64encode(s): """Encode a string using a url-safe Base64 alphabet. s is the string to encode. The encoded string is returned. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ - return b64encode(s, '-_') + return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): """Decode a string encoded with the standard Base64 alphabet. @@ -109,7 +113,7 @@ The alphabet uses '-' instead of '+' and '_' instead of '/'. """ - return b64decode(s, '-_') + return b64decode(s.translate(_urlsafe_decode_translation)) @@ -200,7 +204,7 @@ # False, or the character to map the digit 1 (one) to. It should be # either L (el) or I (eye). if map01: - s = _translate(s, {'0': 'O', '1': map01}) + s = s.translate(string.maketrans(b'01', b'O' + map01)) if casefold: s = s.upper() # Strip off pad characters from the right. We need to count the pad diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -79,9 +79,19 @@ ### Codec base classes (defining the API) class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python to blacklist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default def __new__(cls, encode, decode, streamreader=None, streamwriter=None, - incrementalencoder=None, incrementaldecoder=None, name=None): + incrementalencoder=None, incrementaldecoder=None, name=None, + _is_text_encoding=None): self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) self.name = name self.encode = encode @@ -90,6 +100,8 @@ self.incrementaldecoder = incrementaldecoder self.streamwriter = streamwriter self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding return self def __repr__(self): @@ -126,8 +138,8 @@ 'strict' handling. The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. + StreamWriter for codecs which have to keep state in order to + make encoding efficient. The encoder must be able to handle zero length input and return an empty object of the output object type in this @@ -149,8 +161,8 @@ 'strict' handling. The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. + StreamReader for codecs which have to keep state in order to + make decoding efficient. The decoder must be able to handle zero length input and return an empty object of the output object type in this diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -1434,7 +1434,7 @@ break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 - # age-calculation rules. Remember that zero Max-Age is a + # age-calculation rules. Remember that zero Max-Age # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -259,5 +259,33 @@ x.a = 0xFEDCBA9876543211 self.assertEqual(x.a, 0xFEDCBA9876543211) + @need_symbol('c_uint32') + def test_uint32_swap_little_endian(self): + # Issue #23319 + class Little(LittleEndianStructure): + _fields_ = [("a", c_uint32, 24), + ("b", c_uint32, 4), + ("c", c_uint32, 4)] + b = bytearray(4) + x = Little.from_buffer(b) + x.a = 0xabcdef + x.b = 1 + x.c = 2 + self.assertEqual(b, b'\xef\xcd\xab\x21') + + @need_symbol('c_uint32') + def test_uint32_swap_big_endian(self): + # Issue #23319 + class Big(BigEndianStructure): + _fields_ = [("a", c_uint32, 24), + ("b", c_uint32, 4), + ("c", c_uint32, 4)] + b = bytearray(4) + x = Big.from_buffer(b) + x.a = 0xabcdef + x.b = 1 + x.c = 2 + self.assertEqual(b, b'\xab\xcd\xef\x12') + if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_pointers.py b/lib-python/2.7/ctypes/test/test_pointers.py --- a/lib-python/2.7/ctypes/test/test_pointers.py +++ b/lib-python/2.7/ctypes/test/test_pointers.py @@ -192,9 +192,19 @@ LargeNamedType = type('T' * 2 ** 25, (Structure,), {}) self.assertTrue(POINTER(LargeNamedType)) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[LargeNamedType] + def test_pointer_type_str_name(self): large_string = 'T' * 2 ** 25 - self.assertTrue(POINTER(large_string)) + P = POINTER(large_string) + self.assertTrue(P) + + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[id(P)] + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_random_things.py b/lib-python/2.7/ctypes/test/test_random_things.py --- a/lib-python/2.7/ctypes/test/test_random_things.py +++ b/lib-python/2.7/ctypes/test/test_random_things.py @@ -30,7 +30,7 @@ # value is printed correctly. # # Changed in 0.9.3: No longer is '(in callback)' prepended to the - # error message - instead a additional frame for the C code is + # error message - instead an additional frame for the C code is # created, then a full traceback printed. When SystemExit is # raised in a callback function, the interpreter exits. diff --git a/lib-python/2.7/ctypes/test/test_win32.py b/lib-python/2.7/ctypes/test/test_win32.py --- a/lib-python/2.7/ctypes/test/test_win32.py +++ b/lib-python/2.7/ctypes/test/test_win32.py @@ -114,5 +114,9 @@ self.assertEqual(ret.top, top.value) self.assertEqual(ret.bottom, bottom.value) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[RECT] + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/distutils/__init__.py b/lib-python/2.7/distutils/__init__.py --- a/lib-python/2.7/distutils/__init__.py +++ b/lib-python/2.7/distutils/__init__.py @@ -8,12 +8,6 @@ setup (...) """ -__revision__ = "$Id$" +import sys -# Distutils version -# -# Updated automatically by the Python release process. -# -#--start constants-- -__version__ = "2.7.10" -#--end constants-- +__version__ = sys.version[:sys.version.index(' ')] diff --git a/lib-python/2.7/distutils/ccompiler.py b/lib-python/2.7/distutils/ccompiler.py --- a/lib-python/2.7/distutils/ccompiler.py +++ b/lib-python/2.7/distutils/ccompiler.py @@ -718,7 +718,7 @@ raise NotImplementedError def library_option(self, lib): - """Return the compiler option to add 'dir' to the list of libraries + """Return the compiler option to add 'lib' to the list of libraries linked into the shared library or executable. """ raise NotImplementedError diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -199,10 +199,12 @@ else: # win-amd64 or win-ia64 suffix = self.plat_name[4:] - new_lib = os.path.join(sys.exec_prefix, 'PCbuild') - if suffix: - new_lib = os.path.join(new_lib, suffix) - self.library_dirs.append(new_lib) + # We could have been built in one of two places; add both + for d in ('PCbuild',), ('PC', 'VS9.0'): + new_lib = os.path.join(sys.exec_prefix, *d) + if suffix: + new_lib = os.path.join(new_lib, suffix) + self.library_dirs.append(new_lib) elif MSVC_VERSION == 8: self.library_dirs.append(os.path.join(sys.exec_prefix, diff --git a/lib-python/2.7/distutils/msvc9compiler.py b/lib-python/2.7/distutils/msvc9compiler.py --- a/lib-python/2.7/distutils/msvc9compiler.py +++ b/lib-python/2.7/distutils/msvc9compiler.py @@ -426,7 +426,7 @@ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None' + '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] self.ldflags_static = [ '/nologo'] diff --git a/lib-python/2.7/distutils/tests/test_core.py b/lib-python/2.7/distutils/tests/test_core.py --- a/lib-python/2.7/distutils/tests/test_core.py +++ b/lib-python/2.7/distutils/tests/test_core.py @@ -9,6 +9,7 @@ from test.test_support import captured_stdout, run_unittest import unittest from distutils.tests import support +from distutils import log # setup script that uses __file__ setup_using___file__ = """\ @@ -36,6 +37,7 @@ self.old_stdout = sys.stdout self.cleanup_testfn() self.old_argv = sys.argv, sys.argv[:] + self.addCleanup(log.set_threshold, log._global_log.threshold) def tearDown(self): sys.stdout = self.old_stdout diff --git a/lib-python/2.7/distutils/tests/test_dist.py b/lib-python/2.7/distutils/tests/test_dist.py --- a/lib-python/2.7/distutils/tests/test_dist.py +++ b/lib-python/2.7/distutils/tests/test_dist.py @@ -13,6 +13,7 @@ import distutils.dist from test.test_support import TESTFN, captured_stdout, run_unittest, unlink from distutils.tests import support +from distutils import log class test_dist(Command): @@ -397,6 +398,7 @@ def test_show_help(self): # smoke test, just makes sure some help is displayed + self.addCleanup(log.set_threshold, log._global_log.threshold) dist = Distribution() sys.argv = [] dist.help = 1 diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -12,6 +12,10 @@ import textwrap from cStringIO import StringIO from random import choice +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread import email @@ -33,7 +37,7 @@ from email import base64MIME from email import quopriMIME -from test.test_support import findfile, run_unittest +from test.test_support import findfile, run_unittest, start_threads from email.test import __file__ as landmark @@ -2412,6 +2416,25 @@ addrs = Utils.getaddresses(['User ((nested comment)) ']) eq(addrs[0][1], 'foo at bar.com') + def test_make_msgid_collisions(self): + # Test make_msgid uniqueness, even with multiple threads + class MsgidsThread(Thread): + def run(self): + # generate msgids for 3 seconds + self.msgids = [] + append = self.msgids.append + make_msgid = Utils.make_msgid + clock = time.time + tfin = clock() + 3.0 + while clock() < tfin: + append(make_msgid()) + + threads = [MsgidsThread() for i in range(5)] + with start_threads(threads): + pass + all_ids = sum([t.msgids for t in threads], []) + self.assertEqual(len(set(all_ids)), len(all_ids)) + def test_utils_quote_unquote(self): eq = self.assertEqual msg = Message() diff --git a/lib-python/2.7/email/utils.py b/lib-python/2.7/email/utils.py --- a/lib-python/2.7/email/utils.py +++ b/lib-python/2.7/email/utils.py @@ -177,21 +177,20 @@ def make_msgid(idstring=None): """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: - <20020201195627.33539.96671 at nightshade.la.mastaler.com> + <142480216486.20800.16526388040877946887 at nightshade.la.mastaler.com> Optional idstring if given is a string used to strengthen the uniqueness of the message id. """ - timeval = time.time() - utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) + timeval = int(time.time()*100) pid = os.getpid() - randint = random.randrange(100000) + randint = random.getrandbits(64) if idstring is None: idstring = '' else: idstring = '.' + idstring idhost = socket.getfqdn() - msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost) + msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, idhost) return msgid diff --git a/lib-python/2.7/encodings/base64_codec.py b/lib-python/2.7/encodings/base64_codec.py --- a/lib-python/2.7/encodings/base64_codec.py +++ b/lib-python/2.7/encodings/base64_codec.py @@ -76,4 +76,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/bz2_codec.py b/lib-python/2.7/encodings/bz2_codec.py --- a/lib-python/2.7/encodings/bz2_codec.py +++ b/lib-python/2.7/encodings/bz2_codec.py @@ -99,4 +99,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/hex_codec.py b/lib-python/2.7/encodings/hex_codec.py --- a/lib-python/2.7/encodings/hex_codec.py +++ b/lib-python/2.7/encodings/hex_codec.py @@ -76,4 +76,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/quopri_codec.py b/lib-python/2.7/encodings/quopri_codec.py --- a/lib-python/2.7/encodings/quopri_codec.py +++ b/lib-python/2.7/encodings/quopri_codec.py @@ -21,7 +21,7 @@ # using str() because of cStringIO's Unicode undesired Unicode behavior. f = StringIO(str(input)) g = StringIO() - quopri.encode(f, g, 1) + quopri.encode(f, g, quotetabs=True) output = g.getvalue() return (output, len(input)) @@ -72,4 +72,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/rot_13.py b/lib-python/2.7/encodings/rot_13.py --- a/lib-python/2.7/encodings/rot_13.py +++ b/lib-python/2.7/encodings/rot_13.py @@ -44,6 +44,7 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) ### Decoding Map diff --git a/lib-python/2.7/encodings/uu_codec.py b/lib-python/2.7/encodings/uu_codec.py --- a/lib-python/2.7/encodings/uu_codec.py +++ b/lib-python/2.7/encodings/uu_codec.py @@ -126,4 +126,5 @@ incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/zlib_codec.py b/lib-python/2.7/encodings/zlib_codec.py --- a/lib-python/2.7/encodings/zlib_codec.py +++ b/lib-python/2.7/encodings/zlib_codec.py @@ -99,4 +99,5 @@ incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,9 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "15.2" +_SETUPTOOLS_VERSION = "18.2" -_PIP_VERSION = "6.1.1" +_PIP_VERSION = "7.1.2" # pip currently requires ssl support, so we try to provide a nicer # error message when that is missing (http://bugs.python.org/issue19744) @@ -147,7 +147,7 @@ _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command - args = ["uninstall", "-y"] + args = ["uninstall", "-y", "--disable-pip-version-check"] if verbosity: args += ["-" + "v" * verbosity] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl deleted file mode 100644 index e59694a019051d58b9a378a1adfc9461b8cec9c3..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-7.1.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-7.1.2-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5e490155f0ca7f4ddb64c93c39fb2efb8795cd08 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl deleted file mode 100644 index f153ed376684275e08fcfebdb2de8352fb074171..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-18.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-18.2-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f4288d68e074466894d8a2342e113737df7b7649 GIT binary patch [cut] diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -772,8 +772,7 @@ if self.sock: raise RuntimeError("Can't setup tunnel for established connection.") - self._tunnel_host = host - self._tunnel_port = port + self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: @@ -802,8 +801,8 @@ self.debuglevel = level def _tunnel(self): - (host, port) = self._get_hostport(self._tunnel_host, self._tunnel_port) - self.send("CONNECT %s:%d HTTP/1.0\r\n" % (host, port)) + self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host, + self._tunnel_port)) for header, value in self._tunnel_headers.iteritems(): self.send("%s: %s\r\n" % (header, value)) self.send("\r\n") @@ -811,6 +810,11 @@ method = self._method) (version, code, message) = response._read_status() + if version == "HTTP/0.9": + # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has + # concluded HTTP/0.9 is being used something has gone wrong. + self.close() + raise socket.error("Invalid response from tunnel request") if code != 200: self.close() raise socket.error("Tunnel connection failed: %d %s" % (code, @@ -1063,7 +1067,7 @@ elif body is not None: try: thelen = str(len(body)) - except TypeError: + except (TypeError, AttributeError): # If this is a file-like object, try to # fstat its file descriptor try: diff --git a/lib-python/2.7/idlelib/AutoCompleteWindow.py b/lib-python/2.7/idlelib/AutoCompleteWindow.py --- a/lib-python/2.7/idlelib/AutoCompleteWindow.py +++ b/lib-python/2.7/idlelib/AutoCompleteWindow.py @@ -192,6 +192,7 @@ scrollbar.config(command=listbox.yview) scrollbar.pack(side=RIGHT, fill=Y) listbox.pack(side=LEFT, fill=BOTH, expand=True) + acw.lift() # work around bug in Tk 8.5.18+ (issue #24570) # Initialize the listbox selection self.listbox.select_set(self._binary_search(self.start)) diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -76,7 +76,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -72,6 +72,7 @@ background="#ffffe0", relief=SOLID, borderwidth=1, font = self.widget['font']) self.label.pack() + tw.lift() # work around bug in Tk 8.5.18+ (issue #24570) self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhide_event) diff --git a/lib-python/2.7/idlelib/ClassBrowser.py b/lib-python/2.7/idlelib/ClassBrowser.py --- a/lib-python/2.7/idlelib/ClassBrowser.py +++ b/lib-python/2.7/idlelib/ClassBrowser.py @@ -56,7 +56,7 @@ self.settitle() top.focus_set() # create scrolled canvas - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1) sc.frame.pack(expand=1, fill="both") diff --git a/lib-python/2.7/idlelib/ColorDelegator.py b/lib-python/2.7/idlelib/ColorDelegator.py --- a/lib-python/2.7/idlelib/ColorDelegator.py +++ b/lib-python/2.7/idlelib/ColorDelegator.py @@ -62,7 +62,7 @@ self.tag_raise('sel') def LoadTagDefs(self): - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs = { "COMMENT": idleConf.GetHighlight(theme, "comment"), "KEYWORD": idleConf.GetHighlight(theme, "keyword"), diff --git a/lib-python/2.7/idlelib/Debugger.py b/lib-python/2.7/idlelib/Debugger.py --- a/lib-python/2.7/idlelib/Debugger.py +++ b/lib-python/2.7/idlelib/Debugger.py @@ -17,7 +17,10 @@ self.set_step() return message = self.__frame2message(frame) - self.gui.interaction(message, frame) + try: + self.gui.interaction(message, frame) + except TclError: # When closing debugger window with [x] in 3.x + pass def user_exception(self, frame, info): if self.in_rpc_code(frame): @@ -59,8 +62,42 @@ self.frame = None self.make_gui() self.interacting = 0 + self.nesting_level = 0 def run(self, *args): + # Deal with the scenario where we've already got a program running + # in the debugger and we want to start another. If that is the case, + # our second 'run' was invoked from an event dispatched not from + # the main event loop, but from the nested event loop in 'interaction' + # below. So our stack looks something like this: + # outer main event loop + # run() + # + # callback to debugger's interaction() + # nested event loop + # run() for second command + # + # This kind of nesting of event loops causes all kinds of problems + # (see e.g. issue #24455) especially when dealing with running as a + # subprocess, where there's all kinds of extra stuff happening in + # there - insert a traceback.print_stack() to check it out. + # + # By this point, we've already called restart_subprocess() in + # ScriptBinding. However, we also need to unwind the stack back to + # that outer event loop. To accomplish this, we: + # - return immediately from the nested run() + # - abort_loop ensures the nested event loop will terminate + # - the debugger's interaction routine completes normally + # - the restart_subprocess() will have taken care of stopping + # the running program, which will also let the outer run complete + # + # That leaves us back at the outer main event loop, at which point our + # after event can fire, and we'll come back to this routine with a + # clean stack. + if self.nesting_level > 0: + self.abort_loop() + self.root.after(100, lambda: self.run(*args)) + return try: self.interacting = 1 return self.idb.run(*args) @@ -68,6 +105,10 @@ self.interacting = 0 def close(self, event=None): + try: + self.quit() + except Exception: + pass if self.interacting: self.top.bell() return @@ -191,7 +232,12 @@ b.configure(state="normal") # self.top.wakeup() - self.root.mainloop() + # Nested main loop: Tkinter's main loop is not reentrant, so use + # Tcl's vwait facility, which reenters the event loop until an + # event handler sets the variable we're waiting on + self.nesting_level += 1 + self.root.tk.call('vwait', '::idledebugwait') + self.nesting_level -= 1 # for b in self.buttons: b.configure(state="disabled") @@ -215,23 +261,26 @@ def cont(self): self.idb.set_continue() - self.root.quit() + self.abort_loop() def step(self): self.idb.set_step() - self.root.quit() + self.abort_loop() def next(self): self.idb.set_next(self.frame) - self.root.quit() + self.abort_loop() def ret(self): self.idb.set_return(self.frame) - self.root.quit() + self.abort_loop() def quit(self): self.idb.set_quit() - self.root.quit() + self.abort_loop() + + def abort_loop(self): + self.root.tk.call('set', '::idledebugwait', '1') stackviewer = None diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -9,7 +9,6 @@ import webbrowser from idlelib.MultiCall import MultiCallCreator -from idlelib import idlever from idlelib import WindowList from idlelib import SearchDialog from idlelib import GrepDialog @@ -18,6 +17,7 @@ from idlelib.configHandler import idleConf from idlelib import aboutDialog, textView, configDialog from idlelib import macosxSupport +from idlelib import help # The default tab setting for a Text widget, in average-width characters. TK_TABWIDTH_DEFAULT = 8 @@ -83,6 +83,11 @@ near - a Toplevel widget (e.g. EditorWindow or PyShell) to use as a reference for placing the help window """ + import warnings as w + w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n" + "It will be removed in 3.6 or later.\n" + "It has been replaced by private help.HelpWindow\n", + DeprecationWarning, stacklevel=2) if self.dlg is None: self.show_dialog(parent) if near: @@ -109,9 +114,7 @@ self.dlg = None self.parent = None -helpDialog = HelpDialog() # singleton instance -def _help_dialog(parent): # wrapper for htest - helpDialog.show_dialog(parent) +helpDialog = HelpDialog() # singleton instance, no longer used class EditorWindow(object): @@ -154,7 +157,6 @@ EditorWindow.help_url = 'file://' + EditorWindow.help_url else: EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2] - currentTheme=idleConf.CurrentTheme() self.flist = flist root = root or flist.root self.root = root @@ -182,6 +184,7 @@ 'name': 'text', 'padx': 5, 'wrap': 'none', + 'highlightthickness': 0, 'width': self.width, 'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')} if TkVersion >= 8.5: @@ -200,13 +203,13 @@ if macosxSupport.isAquaTk(): # Command-W on editorwindows doesn't work without this. text.bind('<>', self.close_event) - # Some OS X systems have only one mouse button, - # so use control-click for pulldown menus there. - # (Note, AquaTk defines <2> as the right button if - # present and the Tk Text widget already binds <2>.) + # Some OS X systems have only one mouse button, so use + # control-click for popup context menus there. For two + # buttons, AquaTk defines <2> as the right button, not <3>. text.bind("",self.right_menu_event) + text.bind("<2>", self.right_menu_event) else: - # Elsewhere, use right-click for pulldown menus. + # Elsewhere, use right-click for popup menus. text.bind("<3>",self.right_menu_event) text.bind("<>", self.cut) text.bind("<>", self.copy) @@ -216,8 +219,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -258,13 +259,7 @@ vbar['command'] = text.yview vbar.pack(side=RIGHT, fill=Y) text['yscrollcommand'] = vbar.set - fontWeight = 'normal' - if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'): - fontWeight='bold' - text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'), - idleConf.GetOption('main', 'EditorWindow', - 'font-size', type='int'), - fontWeight)) + text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow') text_frame.pack(side=LEFT, fill=BOTH, expand=1) text.pack(side=TOP, fill=BOTH, expand=1) text.focus_set() @@ -318,7 +313,7 @@ io.set_filename_change_hook(self.filename_change_hook) # Create the recent files submenu - self.recent_files_menu = Menu(self.menubar) + self.recent_files_menu = Menu(self.menubar, tearoff=0) self.menudict['file'].insert_cascade(3, label='Recent Files', underline=0, menu=self.recent_files_menu) @@ -353,36 +348,6 @@ self.askinteger = tkSimpleDialog.askinteger self.showerror = tkMessageBox.showerror - self._highlight_workaround() # Fix selection tags on Windows - - def _highlight_workaround(self): - # On Windows, Tk removes painting of the selection - # tags which is different behavior than on Linux and Mac. - # See issue14146 for more information. - if not sys.platform.startswith('win'): - return - - text = self.text - text.event_add("<>", "") - text.event_add("<>", "") - def highlight_fix(focus): - sel_range = text.tag_ranges("sel") - if sel_range: - if focus == 'out': - HILITE_CONFIG = idleConf.GetHighlight( - idleConf.CurrentTheme(), 'hilite') - text.tag_config("sel_fix", HILITE_CONFIG) - text.tag_raise("sel_fix") - text.tag_add("sel_fix", *sel_range) - elif focus == 'in': - text.tag_remove("sel_fix", "1.0", "end") - - text.bind("<>", - lambda ev: highlight_fix("out")) - text.bind("<>", - lambda ev: highlight_fix("in")) - - def _filename_to_unicode(self, filename): """convert filename to unicode in order to display it in Tk""" if isinstance(filename, unicode) or not filename: @@ -446,6 +411,7 @@ def set_status_bar(self): self.status_bar = self.MultiStatusBar(self.top) + sep = Frame(self.top, height=1, borderwidth=1, background='grey75') if sys.platform == "darwin": # Insert some padding to avoid obscuring some of the statusbar # by the resize widget. @@ -453,6 +419,7 @@ self.status_bar.set_label('column', 'Col: ?', side=RIGHT) self.status_bar.set_label('line', 'Ln: ?', side=RIGHT) self.status_bar.pack(side=BOTTOM, fill=X) + sep.pack(side=BOTTOM, fill=X) self.text.bind("<>", self.set_line_and_column) self.text.event_add("<>", "", "") @@ -479,12 +446,13 @@ self.menudict = menudict = {} for name, label in self.menu_specs: underline, label = prepstr(label) - menudict[name] = menu = Menu(mbar, name=name) + menudict[name] = menu = Menu(mbar, name=name, tearoff=0) mbar.add_cascade(label=label, menu=menu, underline=underline) if macosxSupport.isCarbonTk(): # Insert the application menu - menudict['application'] = menu = Menu(mbar, name='apple') + menudict['application'] = menu = Menu(mbar, name='apple', + tearoff=0) mbar.add_cascade(label='IDLE', menu=menu) self.fill_menus() @@ -565,19 +533,23 @@ return 'normal' def about_dialog(self, event=None): + "Handle Help 'About IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.about_dialog. aboutDialog.AboutDialog(self.top,'About IDLE') def config_dialog(self, event=None): + "Handle Options 'Configure IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - configDialog.ConfigExtensionsDialog(self.top) def help_dialog(self, event=None): + "Handle Help 'IDLE Help' event." + # Synchronize with macosxSupport.overrideRootMenu.help_dialog. if self.root: parent = self.root else: parent = self.top - helpDialog.display(parent, near=self.top) + help.show_idlehelp(parent) def python_docs(self, event=None): if sys.platform[:3] == 'win': @@ -785,7 +757,7 @@ # Called from self.filename_change_hook and from configDialog.py self._rmcolorizer() self._addcolorizer() - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() normal_colors = idleConf.GetHighlight(theme, 'normal') cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg') select_colors = idleConf.GetHighlight(theme, 'hilite') @@ -796,17 +768,15 @@ selectforeground=select_colors['foreground'], selectbackground=select_colors['background'], ) + if TkVersion >= 8.5: + self.text.config( + inactiveselectbackground=select_colors['background']) def ResetFont(self): "Update the text widgets' font if it is changed" # Called from configDialog.py - fontWeight='normal' - if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'): - fontWeight='bold' - self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'), - idleConf.GetOption('main','EditorWindow','font-size', - type='int'), - fontWeight)) + + self.text['font'] = idleConf.GetFont(self.root, 'main','EditorWindow') def RemoveKeybindings(self): "Remove the keybindings before they are changed." @@ -920,8 +890,10 @@ except IOError as err: if not getattr(self.root, "recentfilelist_error_displayed", False): self.root.recentfilelist_error_displayed = True - tkMessageBox.showerror(title='IDLE Error', - message='Unable to update Recent Files list:\n%s' + tkMessageBox.showwarning(title='IDLE Warning', + message="Cannot update File menu Recent Files list. " + "Your operating system says:\n%s\n" + "Select OK and IDLE will continue without updating." % str(err), parent=self.text) # for each edit window instance, construct the recent files menu @@ -1729,4 +1701,4 @@ if __name__ == '__main__': from idlelib.idle_test.htest import run - run(_help_dialog, _editor_window) + run(_editor_window) diff --git a/lib-python/2.7/idlelib/GrepDialog.py b/lib-python/2.7/idlelib/GrepDialog.py --- a/lib-python/2.7/idlelib/GrepDialog.py +++ b/lib-python/2.7/idlelib/GrepDialog.py @@ -1,3 +1,4 @@ +from __future__ import print_function import os import fnmatch import re # for htest @@ -5,7 +6,6 @@ from Tkinter import StringVar, BooleanVar, Checkbutton # for GrepDialog from Tkinter import Tk, Text, Button, SEL, END # for htest from idlelib import SearchEngine -import itertools from idlelib.SearchDialogBase import SearchDialogBase # Importing OutputWindow fails due to import loop # EditorWindow -> GrepDialop -> OutputWindow -> EditorWindow diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -5,22 +5,18 @@ # end-of-line conventions, instead of relying on the standard library, # which will only understand the local convention. +import codecs +from codecs import BOM_UTF8 import os -import types import pipes +import re import sys -import codecs import tempfile + import tkFileDialog import tkMessageBox -import re -from Tkinter import * from SimpleDialog import SimpleDialog -from idlelib.configHandler import idleConf - -from codecs import BOM_UTF8 - # Try setting the locale, so that we can find out # what encoding to use try: @@ -251,7 +247,7 @@ with open(filename, 'rb') as f: chars = f.read() except IOError as msg: - tkMessageBox.showerror("I/O Error", str(msg), master=self.text) + tkMessageBox.showerror("I/O Error", str(msg), parent=self.text) return False chars = self.decode(chars) @@ -298,7 +294,7 @@ title="Error loading the file", message="The encoding '%s' is not known to this Python "\ "installation. The file may not display correctly" % name, - master = self.text) + parent = self.text) enc = None if enc: try: @@ -328,7 +324,7 @@ title="Save On Close", message=message, default=tkMessageBox.YES, - master=self.text) + parent=self.text) if confirm: reply = "yes" self.save(None) @@ -387,11 +383,11 @@ return True except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), - master=self.text) + parent=self.text) return False def encode(self, chars): - if isinstance(chars, types.StringType): + if isinstance(chars, str): # This is either plain ASCII, or Tk was returning mixed-encoding # text to us. Don't try to guess further. return chars @@ -417,7 +413,7 @@ tkMessageBox.showerror( "I/O Error", "%s. Saving as UTF-8" % failed, - master = self.text) + parent = self.text) # If there was a UTF-8 signature, use that. This should not fail if self.fileencoding == BOM_UTF8 or failed: return BOM_UTF8 + chars.encode("utf-8") @@ -430,7 +426,7 @@ "I/O Error", "Cannot save this as '%s' anymore. Saving as UTF-8" \ % self.fileencoding, - master = self.text) + parent = self.text) return BOM_UTF8 + chars.encode("utf-8") # Nothing was declared, and we had not determined an encoding # on loading. Recommend an encoding line. @@ -474,7 +470,7 @@ title="Print", message="Print to Default Printer", default=tkMessageBox.OK, - master=self.text) + parent=self.text) if not confirm: self.text.focus_set() return "break" @@ -511,10 +507,10 @@ status + output if output: output = "Printing command: %s\n" % repr(command) + output - tkMessageBox.showerror("Print status", output, master=self.text) + tkMessageBox.showerror("Print status", output, parent=self.text) else: #no printing for this platform message = "Printing is not enabled for this platform: %s" % platform - tkMessageBox.showinfo("Print status", message, master=self.text) + tkMessageBox.showinfo("Print status", message, parent=self.text) if tempfilename: os.unlink(tempfilename) return "break" @@ -533,7 +529,7 @@ def askopenfile(self): dir, base = self.defaultfilename("open") if not self.opendialog: - self.opendialog = tkFileDialog.Open(master=self.text, + self.opendialog = tkFileDialog.Open(parent=self.text, filetypes=self.filetypes) filename = self.opendialog.show(initialdir=dir, initialfile=base) if isinstance(filename, unicode): @@ -556,7 +552,7 @@ dir, base = self.defaultfilename("save") if not self.savedialog: self.savedialog = tkFileDialog.SaveAs( - master=self.text, + parent=self.text, filetypes=self.filetypes, defaultextension=self.defaultextension) filename = self.savedialog.show(initialdir=dir, initialfile=base) @@ -568,8 +564,12 @@ "Update recent file list on all editor windows" self.editwin.update_recent_files_list(filename) -def _io_binding(parent): - root = Tk() + +def _io_binding(parent): # htest # + from Tkinter import Toplevel, Text + from idlelib.configHandler import idleConf + + root = Toplevel(parent) root.title("Test IOBinding") width, height, x, y = list(map(int, re.split('[x+]', parent.geometry()))) root.geometry("+%d+%d"%(x, y + 150)) @@ -586,12 +586,13 @@ self.text.event_generate("<>") def save(self, event): self.text.event_generate("<>") + def update_recent_files_list(s, f): pass text = Text(root) text.pack() text.focus_set() editwin = MyEditWin(text) - io = IOBinding(editwin) + IOBinding(editwin) if __name__ == "__main__": from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/MultiStatusBar.py b/lib-python/2.7/idlelib/MultiStatusBar.py --- a/lib-python/2.7/idlelib/MultiStatusBar.py +++ b/lib-python/2.7/idlelib/MultiStatusBar.py @@ -8,13 +8,15 @@ Frame.__init__(self, master, **kw) self.labels = {} - def set_label(self, name, text='', side=LEFT): + def set_label(self, name, text='', side=LEFT, width=0): if name not in self.labels: - label = Label(self, bd=1, relief=SUNKEN, anchor=W) - label.pack(side=side) + label = Label(self, borderwidth=0, anchor=W) + label.pack(side=side, pady=0, padx=4) self.labels[name] = label else: label = self.labels[name] + if width != 0: + label.config(width=width) label.config(text=text) def _multistatus_bar(parent): diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,7 +1,139 @@ +What's New in IDLE 2.7.11? +========================== +*Release date: 2015-12-06* + +- Issue 15348: Stop the debugger engine (normally in a user process) + before closing the debugger window (running in the IDLE process). + This prevents the RuntimeErrors that were being caught and ignored. + +- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the + debugger is active (15347); b) closing the debugger with the [X] button + (15348); and c) activating the debugger when already active (24455). + The patch by Mark Roseman does this by making two changes. + 1. Suspend and resume the gui.interaction method with the tcl vwait + mechanism intended for this purpose (instead of root.mainloop & .quit). + 2. In gui.run, allow any existing interaction to terminate first. + +- Change 'The program' to 'Your program' in an IDLE 'kill program?' message + to make it clearer that the program referred to is the currently running + user program, not IDLE itself. + +- Issue #24750: Improve the appearance of the IDLE editor window status bar. + Patch by Mark Roseman. + +- Issue #25313: Change the handling of new built-in text color themes to better + address the compatibility problem introduced by the addition of IDLE Dark. + Consistently use the revised idleConf.CurrentTheme everywhere in idlelib. + +- Issue #24782: Extension configuration is now a tab in the IDLE Preferences + dialog rather than a separate dialog. The former tabs are now a sorted + list. Patch by Mark Roseman. + +- Issue #22726: Re-activate the config dialog help button with some content + about the other buttons and the new IDLE Dark theme. + +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + To use it with IDLEs released before November 2015, hit the + 'Save as New Custom Theme' button and enter a new name, + such as 'Custom Dark'. The custom theme will work with any IDLE + release, and can be modified. + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc chapter. + 'IDLE' now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + +- Issue #24972: Inactive selection background now matches active selection + background, as configured by users, on all systems. Found items are now + always highlighted on Windows. Initial patch by Mark Roseman. + +- Issue #24570: Idle: make calltip and completion boxes appear on Macs + affected by a tk regression. Initial patch by Mark Roseman. + +- Issue #24988: Idle ScrolledList context menus (used in debugger) + now work on Mac Aqua. Patch by Mark Roseman. + +- Issue #24801: Make right-click for context menu work on Mac Aqua. + Patch by Mark Roseman. + +- Issue #25173: Associate tkinter messageboxes with a specific widget. + For Mac OSX, make them a 'sheet'. Patch by Mark Roseman. + +- Issue #25198: Enhance the initial html viewer now used for Idle Help. + * Properly indent fixed-pitch text (patch by Mark Roseman). + * Give code snippet a very Sphinx-like light blueish-gray background. + * Re-use initial width and height set by users for shell and editor. + * When the Table of Contents (TOC) menu is used, put the section header + at the top of the screen. + +- Issue #25225: Condense and rewrite Idle doc section on text colors. + +- Issue #21995: Explain some differences between IDLE and console Python. + +- Issue #22820: Explain need for *print* when running file from Idle editor. + +- Issue #25224: Doc: augment Idle feature list and no-subprocess section. + +- Issue #25219: Update doc for Idle command line options. + Some were missing and notes were not correct. + +- Issue #24861: Most of idlelib is private and subject to change. + Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__. + +- Issue #25199: Idle: add synchronization comments for future maintainers. + +- Issue #16893: Replace help.txt with help.html for Idle doc display. + The new idlelib/help.html is rstripped Doc/build/html/library/idle.html. + It looks better than help.txt and will better document Idle as released. + The tkinter html viewer that works for this file was written by Mark Roseman. + The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated. + +- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6. + +- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts). + +- Issue #23672: Allow Idle to edit and run files with astral chars in name. + Patch by Mohd Sanad Zaki Rizvi. + +- Issue 24745: Idle editor default font. Switch from Courier to + platform-sensitive TkFixedFont. This should not affect current customized + font selections. If there is a problem, edit $HOME/.idlerc/config-main.cfg + and remove 'fontxxx' entries from [Editor Window]. Patch by Mark Roseman. + +- Issue #21192: Idle editor. When a file is run, put its name in the restart bar. + Do not print false prompts. Original patch by Adnan Umer. + +- Issue #13884: Idle menus. Remove tearoff lines. Patch by Roger Serwy. + +- Issue #15809: IDLE shell now uses locale encoding instead of Latin1 for + decoding unicode literals. + + +What's New in IDLE 2.7.10? +========================= +*Release date: 2015-05-23* + +- Issue #23583: Fixed writing unicode to standard output stream in IDLE. + +- Issue #20577: Configuration of the max line length for the FormatParagraph + extension has been moved from the General tab of the Idle preferences dialog + to the FormatParagraph tab of the Config Extensions dialog. + Patch by Tal Einat. + +- Issue #16893: Update Idle doc chapter to match current Idle and add new + information. + +- Issue #23180: Rename IDLE "Windows" menu item to "Window". + Patch by Al Sweigart. + + What's New in IDLE 2.7.9? ========================= - -*Release data: 2014-12-07* (projected) +*Release date: 2014-12-10* - Issue #16893: Update Idle doc chapter to match current Idle and add new information. @@ -35,7 +167,6 @@ What's New in IDLE 2.7.8? ========================= - *Release date: 2014-06-29* - Issue #21940: Add unittest for WidgetRedirector. Initial patch by Saimadhav @@ -63,7 +194,6 @@ What's New in IDLE 2.7.7? ========================= - *Release date: 2014-05-31* - Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin @@ -101,7 +231,6 @@ What's New in IDLE 2.7.6? ========================= - *Release date: 2013-11-10* - Issue #19426: Fixed the opening of Python source file with specified encoding. @@ -149,7 +278,6 @@ What's New in IDLE 2.7.5? ========================= - *Release date: 2013-05-12* - Issue #17838: Allow sys.stdin to be reassigned. @@ -184,7 +312,6 @@ What's New in IDLE 2.7.4? ========================= - *Release date: 2013-04-06* - Issue #17625: In IDLE, close the replace dialog after it is used. @@ -255,7 +382,6 @@ What's New in IDLE 2.7.3? ========================= - *Release date: 2012-04-09* - Issue #964437 Make IDLE help window non-modal. @@ -288,7 +414,6 @@ What's New in IDLE 2.7.2? ========================= - *Release date: 2011-06-11* - Issue #11718: IDLE's open module dialog couldn't find the __init__.py @@ -333,7 +458,6 @@ What's New in Python 2.7.1? =========================== - *Release date: 2010-11-27* - Issue #6378: idle.bat now runs with the appropriate Python version rather than @@ -342,7 +466,6 @@ What's New in IDLE 2.7? ======================= - *Release date: 2010-07-03* - Issue #5150: IDLE's format menu now has an option to strip trailing @@ -374,7 +497,6 @@ What's New in IDLE 2.6? ======================= - *Release date: 01-Oct-2008* - Issue #2665: On Windows, an IDLE installation upgraded from an old version @@ -388,11 +510,6 @@ - Autocompletion of filenames now support alternate separators, e.g. the '/' char on Windows. Patch 2061 Tal Einat. -What's New in IDLE 2.6a1? -========================= - -*Release date: 29-Feb-2008* - - Configured selection highlighting colors were ignored; updating highlighting in the config dialog would cause non-Python files to be colored as if they were Python source; improve use of ColorDelagator. Patch 1334. Tal Einat. @@ -464,15 +581,8 @@ What's New in IDLE 1.2? ======================= - *Release date: 19-SEP-2006* - -What's New in IDLE 1.2c1? -========================= - -*Release date: 17-AUG-2006* - - File menu hotkeys: there were three 'p' assignments. Reassign the 'Save Copy As' and 'Print' hotkeys to 'y' and 't'. Change the Shell hotkey from 's' to 'l'. @@ -493,11 +603,6 @@ - When used w/o subprocess, all exceptions were preceded by an error message claiming they were IDLE internal errors (since 1.2a1). -What's New in IDLE 1.2b3? -========================= - -*Release date: 03-AUG-2006* - - Bug #1525817: Don't truncate short lines in IDLE's tool tips. - Bug #1517990: IDLE keybindings on MacOS X now work correctly @@ -521,26 +626,6 @@ 'as' keyword in comment directly following import command. Closes 1325071. Patch 1479219 Tal Einat -What's New in IDLE 1.2b2? -========================= - -*Release date: 11-JUL-2006* - -What's New in IDLE 1.2b1? -========================= - -*Release date: 20-JUN-2006* - -What's New in IDLE 1.2a2? -========================= - -*Release date: 27-APR-2006* - -What's New in IDLE 1.2a1? -========================= - -*Release date: 05-APR-2006* - - Patch #1162825: Support non-ASCII characters in IDLE window titles. - Source file f.flush() after writing; trying to avoid lossage if user @@ -620,19 +705,14 @@ - The remote procedure call module rpc.py can now access data attributes of remote registered objects. Changes to these attributes are local, however. + What's New in IDLE 1.1? ======================= - *Release date: 30-NOV-2004* - On OpenBSD, terminating IDLE with ctrl-c from the command line caused a stuck subprocess MainThread because only the SocketThread was exiting. -What's New in IDLE 1.1b3/rc1? -============================= - -*Release date: 18-NOV-2004* - - Saving a Keyset w/o making changes (by using the "Save as New Custom Key Set" button) caused IDLE to fail on restart (no new keyset was created in config-keys.cfg). Also true for Theme/highlights. Python Bug 1064535. @@ -640,28 +720,12 @@ - A change to the linecache.py API caused IDLE to exit when an exception was raised while running without the subprocess (-n switch). Python Bug 1063840. -What's New in IDLE 1.1b2? -========================= - -*Release date: 03-NOV-2004* - - When paragraph reformat width was made configurable, a bug was introduced that caused reformatting of comment blocks to ignore how far the block was indented, effectively adding the indentation width to the reformat width. This has been repaired, and the reformat width is again a bound on the total width of reformatted lines. -What's New in IDLE 1.1b1? -========================= - -*Release date: 15-OCT-2004* - - -What's New in IDLE 1.1a3? -========================= - -*Release date: 02-SEP-2004* - - Improve keyboard focus binding, especially in Windows menu. Improve window raising, especially in the Windows menu and in the debugger. IDLEfork 763524. @@ -669,24 +733,12 @@ - If user passes a non-existent filename on the commandline, just open a new file, don't raise a dialog. IDLEfork 854928. - -What's New in IDLE 1.1a2? -========================= - -*Release date: 05-AUG-2004* - - EditorWindow.py was not finding the .chm help file on Windows. Typo at Rev 1.54. Python Bug 990954 - checking sys.platform for substring 'win' was breaking IDLE docs on Mac (darwin). Also, Mac Safari browser requires full file:// URIs. SF 900580. - -What's New in IDLE 1.1a1? -========================= - -*Release date: 08-JUL-2004* - - Redirect the warning stream to the shell during the ScriptBinding check of user code and format the warning similarly to an exception for both that check and for runtime warnings raised in the subprocess. @@ -749,26 +801,10 @@ What's New in IDLE 1.0? ======================= - *Release date: 29-Jul-2003* -- Added a banner to the shell discussing warnings possibly raised by personal - firewall software. Added same comment to README.txt. - - -What's New in IDLE 1.0 release candidate 2? -=========================================== - -*Release date: 24-Jul-2003* - - Calltip error when docstring was None Python Bug 775541 - -What's New in IDLE 1.0 release candidate 1? -=========================================== - -*Release date: 18-Jul-2003* - - Updated extend.txt, help.txt, and config-extensions.def to correctly reflect the current status of the configuration system. Python Bug 768469 @@ -784,12 +820,6 @@ sys.std{in|out|err}.encoding, for both the local and the subprocess case. SF IDLEfork patch 682347. - -What's New in IDLE 1.0b2? -========================= - -*Release date: 29-Jun-2003* - - Extend AboutDialog.ViewFile() to support file encodings. Make the CREDITS file Latin-1. @@ -828,7 +858,6 @@ What's New in IDLEfork 0.9b1? ============================= - *Release date: 02-Jun-2003* - The current working directory of the execution environment (and shell @@ -930,10 +959,8 @@ exception formatting to the subprocess. - What's New in IDLEfork 0.9 Alpha 2? =================================== - *Release date: 27-Jan-2003* - Updated INSTALL.txt to claify use of the python2 rpm. @@ -1037,7 +1064,6 @@ What's New in IDLEfork 0.9 Alpha 1? =================================== - *Release date: 31-Dec-2002* - First release of major new functionality. For further details refer to diff --git a/lib-python/2.7/idlelib/OutputWindow.py b/lib-python/2.7/idlelib/OutputWindow.py --- a/lib-python/2.7/idlelib/OutputWindow.py +++ b/lib-python/2.7/idlelib/OutputWindow.py @@ -96,7 +96,7 @@ "No special line", "The line you point at doesn't look like " "a valid file name followed by a line number.", - master=self.text) + parent=self.text) return filename, lineno = result edit = self.flist.open(filename) diff --git a/lib-python/2.7/idlelib/PathBrowser.py b/lib-python/2.7/idlelib/PathBrowser.py --- a/lib-python/2.7/idlelib/PathBrowser.py +++ b/lib-python/2.7/idlelib/PathBrowser.py @@ -17,6 +17,7 @@ self.init(flist) def settitle(self): + "Set window titles." self.top.wm_title("Path Browser") self.top.wm_iconname("Path Browser") @@ -70,7 +71,7 @@ def ispackagedir(self, file): if not os.path.isdir(file): - return 0 + return False init = os.path.join(file, "__init__.py") return os.path.exists(init) @@ -91,7 +92,7 @@ sorted.sort() return sorted -def _path_browser(parent): +def _path_browser(parent): # htest # flist = PyShellFileList(parent) PathBrowser(flist, _htest=True) parent.mainloop() diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py --- a/lib-python/2.7/idlelib/PyShell.py +++ b/lib-python/2.7/idlelib/PyShell.py @@ -10,8 +10,6 @@ import socket import time import threading -import traceback -import types import io import linecache @@ -32,11 +30,11 @@ from idlelib.UndoDelegator import UndoDelegator from idlelib.OutputWindow import OutputWindow from idlelib.configHandler import idleConf -from idlelib import idlever from idlelib import rpc from idlelib import Debugger from idlelib import RemoteDebugger from idlelib import macosxSupport +from idlelib import IOBinding IDENTCHARS = string.ascii_letters + string.digits + "_" HOST = '127.0.0.1' # python execution server on localhost loopback @@ -160,7 +158,7 @@ # possible due to update in restore_file_breaks return if color: - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() cfg = idleConf.GetHighlight(theme, "break") else: cfg = {'foreground': '', 'background': ''} @@ -171,7 +169,7 @@ filename = self.io.filename text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1)) try: - i = self.breakpoints.index(lineno) + self.breakpoints.index(lineno) except ValueError: # only add if missing, i.e. do once self.breakpoints.append(lineno) try: # update the subprocess debugger @@ -345,7 +343,7 @@ def LoadTagDefs(self): ColorDelegator.LoadTagDefs(self) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs.update({ "stdin": {'background':None,'foreground':None}, "stdout": idleConf.GetHighlight(theme, "stdout"), @@ -439,7 +437,7 @@ try: self.rpcclt = MyRPCClient(addr) break - except socket.error as err: + except socket.error: pass else: self.display_port_binding_error() @@ -460,7 +458,7 @@ self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() - except socket.timeout as err: + except socket.timeout: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) @@ -474,7 +472,7 @@ self.poll_subprocess() return self.rpcclt - def restart_subprocess(self, with_cwd=False): + def restart_subprocess(self, with_cwd=False, filename=''): if self.restarting: return self.rpcclt self.restarting = True @@ -495,25 +493,24 @@ self.spawn_subprocess() try: self.rpcclt.accept() - except socket.timeout as err: + except socket.timeout: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) console.stop_readline() # annotate restart in shell window and mark it console.text.delete("iomark", "end-1c") - if was_executing: - console.write('\n') - console.showprompt() - halfbar = ((int(console.width) - 16) // 2) * '=' - console.write(halfbar + ' RESTART ' + halfbar) + tag = 'RESTART: ' + (filename if filename else 'Shell') + halfbar = ((int(console.width) -len(tag) - 4) // 2) * '=' + console.write("\n{0} {1} {0}".format(halfbar, tag)) console.text.mark_set("restart", "end-1c") console.text.mark_gravity("restart", "left") - console.showprompt() + if not filename: + console.showprompt() # restart subprocess debugger if debug: # Restarted debugger connects to current instance of debug GUI - gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt) + RemoteDebugger.restart_subprocess_debugger(self.rpcclt) # reload remote debugger breakpoints for all PyShellEditWindows debug.load_breakpoints() self.compile.compiler.flags = self.original_compiler_flags @@ -634,7 +631,7 @@ item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid) from idlelib.TreeWidget import ScrolledCanvas, TreeNode top = Toplevel(self.tkconsole.root) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0) sc.frame.pack(expand=1, fill="both") @@ -654,7 +651,7 @@ if source is None: source = open(filename, "r").read() try: - code = compile(source, filename, "exec") + code = compile(source, filename, "exec", dont_inherit=True) except (OverflowError, SyntaxError): self.tkconsole.resetoutput() print('*** Error in script or command!\n' @@ -671,10 +668,11 @@ self.more = 0 self.save_warnings_filters = warnings.filters[:] warnings.filterwarnings(action="error", category=SyntaxWarning) - if isinstance(source, types.UnicodeType): - from idlelib import IOBinding + if isinstance(source, unicode) and IOBinding.encoding != 'utf-8': try: - source = source.encode(IOBinding.encoding) + source = '# -*- coding: %s -*-\n%s' % ( + IOBinding.encoding, + source.encode(IOBinding.encoding)) except UnicodeError: self.tkconsole.resetoutput() self.write("Unsupported characters in input\n") @@ -801,7 +799,7 @@ "Exit?", "Do you want to exit altogether?", default="yes", - master=self.tkconsole.text): + parent=self.tkconsole.text): raise else: self.showtraceback() @@ -839,7 +837,7 @@ "Run IDLE with the -n command line switch to start without a " "subprocess and refer to Help/IDLE Help 'Running without a " "subprocess' for further details.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_no_subprocess_error(self): tkMessageBox.showerror( @@ -847,14 +845,14 @@ "IDLE's subprocess didn't make connection. Either IDLE can't " "start a subprocess or personal firewall software is blocking " "the connection.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_executing_dialog(self): tkMessageBox.showerror( "Already executing", "The Python Shell window is already executing a command; " "please wait until it is finished.", - master=self.tkconsole.text) + parent=self.tkconsole.text) From pypy.commits at gmail.com Tue Dec 22 03:01:20 2015 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Dec 2015 00:01:20 -0800 (PST) Subject: [pypy-commit] pypy default: fix translation when CLOCK_T is unsigned (BSDs) Message-ID: <56790350.913bc20a.d29ab.431d@mx.google.com> Author: Philip Jenvey Branch: Changeset: r81417:49495a30004c Date: 2015-12-21 15:52 -0800 http://bitbucket.org/pypy/pypy/changeset/49495a30004c/ Log: fix translation when CLOCK_T is unsigned (BSDs) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1302,7 +1302,7 @@ try: # note: times() can return a negative value (or even -1) # even if there is no error - result = widen(c_times(l_tmsbuf)) + result = rffi.cast(lltype.Signed, c_times(l_tmsbuf)) if result == -1: errno = get_saved_errno() if errno != 0: From pypy.commits at gmail.com Tue Dec 22 03:01:22 2015 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Dec 2015 00:01:22 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.11: merge vendor/stdlib (2.7.11) Message-ID: <56790352.022f1c0a.fa884.246f@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r81418:935519a71e7c Date: 2015-12-21 23:43 -0800 http://bitbucket.org/pypy/pypy/changeset/935519a71e7c/ Log: merge vendor/stdlib (2.7.11) diff too long, truncating to 2000 out of 24624 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -120,11 +120,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. @@ -308,13 +304,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -335,6 +333,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) diff --git a/lib-python/2.7/UserDict.py b/lib-python/2.7/UserDict.py --- a/lib-python/2.7/UserDict.py +++ b/lib-python/2.7/UserDict.py @@ -1,7 +1,24 @@ """A more or less complete user-defined wrapper around dictionary objects.""" class UserDict: - def __init__(self, dict=None, **kwargs): + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is " + "deprecated", PendingDeprecationWarning, + stacklevel=2) + else: + dict = None self.data = {} if dict is not None: self.update(dict) @@ -43,7 +60,23 @@ def itervalues(self): return self.data.itervalues() def values(self): return self.data.values() def has_key(self, key): return key in self.data - def update(self, dict=None, **kwargs): + def update(*args, **kwargs): + if not args: + raise TypeError("descriptor 'update' of 'UserDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + PendingDeprecationWarning, stacklevel=2) + else: + dict = None if dict is None: pass elif isinstance(dict, UserDict): diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -453,6 +453,7 @@ for key in self._mapping: yield key +KeysView.register(type({}.viewkeys())) class ItemsView(MappingView, Set): @@ -473,6 +474,7 @@ for key in self._mapping: yield (key, self._mapping[key]) +ItemsView.register(type({}.viewitems())) class ValuesView(MappingView): @@ -486,6 +488,7 @@ for key in self._mapping: yield self._mapping[key] +ValuesView.register(type({}.viewvalues())) class MutableMapping(Mapping): diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -7,6 +7,7 @@ import os import abc import codecs +import sys import warnings import errno # Import thread instead of threading to reduce startup cost @@ -1497,6 +1498,11 @@ if not isinstance(encoding, basestring): raise ValueError("invalid encoding: %r" % encoding) + if sys.py3kwarning and not codecs.lookup(encoding)._is_text_encoding: + msg = ("%r is not a text encoding; " + "use codecs.open() to handle arbitrary codecs") + warnings.warnpy3k(msg % encoding, stacklevel=2) + if errors is None: errors = "strict" else: diff --git a/lib-python/2.7/base64.py b/lib-python/2.7/base64.py --- a/lib-python/2.7/base64.py +++ b/lib-python/2.7/base64.py @@ -7,6 +7,7 @@ import re import struct +import string import binascii @@ -52,7 +53,7 @@ # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] if altchars is not None: - return _translate(encoded, {'+': altchars[0], '/': altchars[1]}) + return encoded.translate(string.maketrans(b'+/', altchars[:2])) return encoded @@ -68,7 +69,7 @@ string. """ if altchars is not None: - s = _translate(s, {altchars[0]: '+', altchars[1]: '/'}) + s = s.translate(string.maketrans(altchars[:2], '+/')) try: return binascii.a2b_base64(s) except binascii.Error, msg: @@ -92,13 +93,16 @@ """ return b64decode(s) +_urlsafe_encode_translation = string.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = string.maketrans(b'-_', b'+/') + def urlsafe_b64encode(s): """Encode a string using a url-safe Base64 alphabet. s is the string to encode. The encoded string is returned. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ - return b64encode(s, '-_') + return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): """Decode a string encoded with the standard Base64 alphabet. @@ -109,7 +113,7 @@ The alphabet uses '-' instead of '+' and '_' instead of '/'. """ - return b64decode(s, '-_') + return b64decode(s.translate(_urlsafe_decode_translation)) @@ -200,7 +204,7 @@ # False, or the character to map the digit 1 (one) to. It should be # either L (el) or I (eye). if map01: - s = _translate(s, {'0': 'O', '1': map01}) + s = s.translate(string.maketrans(b'01', b'O' + map01)) if casefold: s = s.upper() # Strip off pad characters from the right. We need to count the pad diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -79,9 +79,19 @@ ### Codec base classes (defining the API) class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python to blacklist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default def __new__(cls, encode, decode, streamreader=None, streamwriter=None, - incrementalencoder=None, incrementaldecoder=None, name=None): + incrementalencoder=None, incrementaldecoder=None, name=None, + _is_text_encoding=None): self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) self.name = name self.encode = encode @@ -90,6 +100,8 @@ self.incrementaldecoder = incrementaldecoder self.streamwriter = streamwriter self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding return self def __repr__(self): @@ -126,8 +138,8 @@ 'strict' handling. The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. + StreamWriter for codecs which have to keep state in order to + make encoding efficient. The encoder must be able to handle zero length input and return an empty object of the output object type in this @@ -149,8 +161,8 @@ 'strict' handling. The method may not store state in the Codec instance. Use - StreamCodec for codecs which have to keep state in order to - make encoding/decoding efficient. + StreamReader for codecs which have to keep state in order to + make decoding efficient. The decoder must be able to handle zero length input and return an empty object of the output object type in this diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py --- a/lib-python/2.7/cookielib.py +++ b/lib-python/2.7/cookielib.py @@ -1434,7 +1434,7 @@ break # convert RFC 2965 Max-Age to seconds since epoch # XXX Strictly you're supposed to follow RFC 2616 - # age-calculation rules. Remember that zero Max-Age is a + # age-calculation rules. Remember that zero Max-Age # is a request to discard (old and new) cookie, though. k = "expires" v = self._now + v diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -264,5 +264,33 @@ x.a = 0xFEDCBA9876543211 self.assertEqual(x.a, 0xFEDCBA9876543211) + @need_symbol('c_uint32') + def test_uint32_swap_little_endian(self): + # Issue #23319 + class Little(LittleEndianStructure): + _fields_ = [("a", c_uint32, 24), + ("b", c_uint32, 4), + ("c", c_uint32, 4)] + b = bytearray(4) + x = Little.from_buffer(b) + x.a = 0xabcdef + x.b = 1 + x.c = 2 + self.assertEqual(b, b'\xef\xcd\xab\x21') + + @need_symbol('c_uint32') + def test_uint32_swap_big_endian(self): + # Issue #23319 + class Big(BigEndianStructure): + _fields_ = [("a", c_uint32, 24), + ("b", c_uint32, 4), + ("c", c_uint32, 4)] + b = bytearray(4) + x = Big.from_buffer(b) + x.a = 0xabcdef + x.b = 1 + x.c = 2 + self.assertEqual(b, b'\xab\xcd\xef\x12') + if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_pointers.py b/lib-python/2.7/ctypes/test/test_pointers.py --- a/lib-python/2.7/ctypes/test/test_pointers.py +++ b/lib-python/2.7/ctypes/test/test_pointers.py @@ -192,9 +192,19 @@ LargeNamedType = type('T' * 2 ** 25, (Structure,), {}) self.assertTrue(POINTER(LargeNamedType)) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[LargeNamedType] + def test_pointer_type_str_name(self): large_string = 'T' * 2 ** 25 - self.assertTrue(POINTER(large_string)) + P = POINTER(large_string) + self.assertTrue(P) + + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[id(P)] + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_random_things.py b/lib-python/2.7/ctypes/test/test_random_things.py --- a/lib-python/2.7/ctypes/test/test_random_things.py +++ b/lib-python/2.7/ctypes/test/test_random_things.py @@ -30,7 +30,7 @@ # value is printed correctly. # # Changed in 0.9.3: No longer is '(in callback)' prepended to the - # error message - instead a additional frame for the C code is + # error message - instead an additional frame for the C code is # created, then a full traceback printed. When SystemExit is # raised in a callback function, the interpreter exits. diff --git a/lib-python/2.7/ctypes/test/test_win32.py b/lib-python/2.7/ctypes/test/test_win32.py --- a/lib-python/2.7/ctypes/test/test_win32.py +++ b/lib-python/2.7/ctypes/test/test_win32.py @@ -114,5 +114,9 @@ self.assertEqual(ret.top, top.value) self.assertEqual(ret.bottom, bottom.value) + # to not leak references, we must clean _pointer_type_cache + from ctypes import _pointer_type_cache + del _pointer_type_cache[RECT] + if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/distutils/__init__.py b/lib-python/2.7/distutils/__init__.py --- a/lib-python/2.7/distutils/__init__.py +++ b/lib-python/2.7/distutils/__init__.py @@ -8,12 +8,6 @@ setup (...) """ -__revision__ = "$Id$" +import sys -# Distutils version -# -# Updated automatically by the Python release process. -# -#--start constants-- -__version__ = "2.7.10" -#--end constants-- +__version__ = sys.version[:sys.version.index(' ')] diff --git a/lib-python/2.7/distutils/ccompiler.py b/lib-python/2.7/distutils/ccompiler.py --- a/lib-python/2.7/distutils/ccompiler.py +++ b/lib-python/2.7/distutils/ccompiler.py @@ -718,7 +718,7 @@ raise NotImplementedError def library_option(self, lib): - """Return the compiler option to add 'dir' to the list of libraries + """Return the compiler option to add 'lib' to the list of libraries linked into the shared library or executable. """ raise NotImplementedError diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -209,10 +209,12 @@ else: # win-amd64 or win-ia64 suffix = self.plat_name[4:] - new_lib = os.path.join(sys.exec_prefix, 'PCbuild') - if suffix: - new_lib = os.path.join(new_lib, suffix) - self.library_dirs.append(new_lib) + # We could have been built in one of two places; add both + for d in ('PCbuild',), ('PC', 'VS9.0'): + new_lib = os.path.join(sys.exec_prefix, *d) + if suffix: + new_lib = os.path.join(new_lib, suffix) + self.library_dirs.append(new_lib) elif MSVC_VERSION == 8: self.library_dirs.append(os.path.join(sys.exec_prefix, diff --git a/lib-python/2.7/distutils/msvc9compiler.py b/lib-python/2.7/distutils/msvc9compiler.py --- a/lib-python/2.7/distutils/msvc9compiler.py +++ b/lib-python/2.7/distutils/msvc9compiler.py @@ -426,7 +426,7 @@ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO'] if self.__version >= 7: self.ldflags_shared_debug = [ - '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None' + '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG' ] self.ldflags_static = [ '/nologo'] diff --git a/lib-python/2.7/distutils/tests/test_core.py b/lib-python/2.7/distutils/tests/test_core.py --- a/lib-python/2.7/distutils/tests/test_core.py +++ b/lib-python/2.7/distutils/tests/test_core.py @@ -9,6 +9,7 @@ from test.test_support import captured_stdout, run_unittest import unittest from distutils.tests import support +from distutils import log # setup script that uses __file__ setup_using___file__ = """\ @@ -36,6 +37,7 @@ self.old_stdout = sys.stdout self.cleanup_testfn() self.old_argv = sys.argv, sys.argv[:] + self.addCleanup(log.set_threshold, log._global_log.threshold) def tearDown(self): sys.stdout = self.old_stdout diff --git a/lib-python/2.7/distutils/tests/test_dist.py b/lib-python/2.7/distutils/tests/test_dist.py --- a/lib-python/2.7/distutils/tests/test_dist.py +++ b/lib-python/2.7/distutils/tests/test_dist.py @@ -13,6 +13,7 @@ import distutils.dist from test.test_support import TESTFN, captured_stdout, run_unittest, unlink from distutils.tests import support +from distutils import log class test_dist(Command): @@ -397,6 +398,7 @@ def test_show_help(self): # smoke test, just makes sure some help is displayed + self.addCleanup(log.set_threshold, log._global_log.threshold) dist = Distribution() sys.argv = [] dist.help = 1 diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py --- a/lib-python/2.7/email/test/test_email.py +++ b/lib-python/2.7/email/test/test_email.py @@ -12,6 +12,10 @@ import textwrap from cStringIO import StringIO from random import choice +try: + from threading import Thread +except ImportError: + from dummy_threading import Thread import email @@ -33,7 +37,7 @@ from email import base64MIME from email import quopriMIME -from test.test_support import findfile, run_unittest +from test.test_support import findfile, run_unittest, start_threads from email.test import __file__ as landmark @@ -2412,6 +2416,25 @@ addrs = Utils.getaddresses(['User ((nested comment)) ']) eq(addrs[0][1], 'foo at bar.com') + def test_make_msgid_collisions(self): + # Test make_msgid uniqueness, even with multiple threads + class MsgidsThread(Thread): + def run(self): + # generate msgids for 3 seconds + self.msgids = [] + append = self.msgids.append + make_msgid = Utils.make_msgid + clock = time.time + tfin = clock() + 3.0 + while clock() < tfin: + append(make_msgid()) + + threads = [MsgidsThread() for i in range(5)] + with start_threads(threads): + pass + all_ids = sum([t.msgids for t in threads], []) + self.assertEqual(len(set(all_ids)), len(all_ids)) + def test_utils_quote_unquote(self): eq = self.assertEqual msg = Message() diff --git a/lib-python/2.7/email/utils.py b/lib-python/2.7/email/utils.py --- a/lib-python/2.7/email/utils.py +++ b/lib-python/2.7/email/utils.py @@ -177,21 +177,20 @@ def make_msgid(idstring=None): """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: - <20020201195627.33539.96671 at nightshade.la.mastaler.com> + <142480216486.20800.16526388040877946887 at nightshade.la.mastaler.com> Optional idstring if given is a string used to strengthen the uniqueness of the message id. """ - timeval = time.time() - utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) + timeval = int(time.time()*100) pid = os.getpid() - randint = random.randrange(100000) + randint = random.getrandbits(64) if idstring is None: idstring = '' else: idstring = '.' + idstring idhost = socket.getfqdn() - msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost) + msgid = '<%d.%d.%d%s@%s>' % (timeval, pid, randint, idstring, idhost) return msgid diff --git a/lib-python/2.7/encodings/base64_codec.py b/lib-python/2.7/encodings/base64_codec.py --- a/lib-python/2.7/encodings/base64_codec.py +++ b/lib-python/2.7/encodings/base64_codec.py @@ -76,4 +76,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/bz2_codec.py b/lib-python/2.7/encodings/bz2_codec.py --- a/lib-python/2.7/encodings/bz2_codec.py +++ b/lib-python/2.7/encodings/bz2_codec.py @@ -99,4 +99,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/hex_codec.py b/lib-python/2.7/encodings/hex_codec.py --- a/lib-python/2.7/encodings/hex_codec.py +++ b/lib-python/2.7/encodings/hex_codec.py @@ -76,4 +76,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/quopri_codec.py b/lib-python/2.7/encodings/quopri_codec.py --- a/lib-python/2.7/encodings/quopri_codec.py +++ b/lib-python/2.7/encodings/quopri_codec.py @@ -21,7 +21,7 @@ # using str() because of cStringIO's Unicode undesired Unicode behavior. f = StringIO(str(input)) g = StringIO() - quopri.encode(f, g, 1) + quopri.encode(f, g, quotetabs=True) output = g.getvalue() return (output, len(input)) @@ -72,4 +72,5 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/rot_13.py b/lib-python/2.7/encodings/rot_13.py --- a/lib-python/2.7/encodings/rot_13.py +++ b/lib-python/2.7/encodings/rot_13.py @@ -44,6 +44,7 @@ incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, + _is_text_encoding=False, ) ### Decoding Map diff --git a/lib-python/2.7/encodings/uu_codec.py b/lib-python/2.7/encodings/uu_codec.py --- a/lib-python/2.7/encodings/uu_codec.py +++ b/lib-python/2.7/encodings/uu_codec.py @@ -126,4 +126,5 @@ incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/encodings/zlib_codec.py b/lib-python/2.7/encodings/zlib_codec.py --- a/lib-python/2.7/encodings/zlib_codec.py +++ b/lib-python/2.7/encodings/zlib_codec.py @@ -99,4 +99,5 @@ incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, + _is_text_encoding=False, ) diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py --- a/lib-python/2.7/ensurepip/__init__.py +++ b/lib-python/2.7/ensurepip/__init__.py @@ -12,9 +12,9 @@ __all__ = ["version", "bootstrap"] -_SETUPTOOLS_VERSION = "15.2" +_SETUPTOOLS_VERSION = "18.2" -_PIP_VERSION = "6.1.1" +_PIP_VERSION = "7.1.2" # pip currently requires ssl support, so we try to provide a nicer # error message when that is missing (http://bugs.python.org/issue19744) @@ -147,7 +147,7 @@ _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command - args = ["uninstall", "-y"] + args = ["uninstall", "-y", "--disable-pip-version-check"] if verbosity: args += ["-" + "v" * verbosity] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-6.1.1-py2.py3-none-any.whl deleted file mode 100644 index e59694a019051d58b9a378a1adfc9461b8cec9c3..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/pip-7.1.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/pip-7.1.2-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..5e490155f0ca7f4ddb64c93c39fb2efb8795cd08 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-15.2-py2.py3-none-any.whl deleted file mode 100644 index f153ed376684275e08fcfebdb2de8352fb074171..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 GIT binary patch [cut] diff --git a/lib-python/2.7/ensurepip/_bundled/setuptools-18.2-py2.py3-none-any.whl b/lib-python/2.7/ensurepip/_bundled/setuptools-18.2-py2.py3-none-any.whl new file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..f4288d68e074466894d8a2342e113737df7b7649 GIT binary patch [cut] diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py --- a/lib-python/2.7/httplib.py +++ b/lib-python/2.7/httplib.py @@ -772,8 +772,7 @@ if self.sock: raise RuntimeError("Can't setup tunnel for established connection.") - self._tunnel_host = host - self._tunnel_port = port + self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: @@ -802,8 +801,8 @@ self.debuglevel = level def _tunnel(self): - (host, port) = self._get_hostport(self._tunnel_host, self._tunnel_port) - self.send("CONNECT %s:%d HTTP/1.0\r\n" % (host, port)) + self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host, + self._tunnel_port)) for header, value in self._tunnel_headers.iteritems(): self.send("%s: %s\r\n" % (header, value)) self.send("\r\n") @@ -811,6 +810,11 @@ method = self._method) (version, code, message) = response._read_status() + if version == "HTTP/0.9": + # HTTP/0.9 doesn't support the CONNECT verb, so if httplib has + # concluded HTTP/0.9 is being used something has gone wrong. + self.close() + raise socket.error("Invalid response from tunnel request") if code != 200: self.close() raise socket.error("Tunnel connection failed: %d %s" % (code, @@ -1063,7 +1067,7 @@ elif body is not None: try: thelen = str(len(body)) - except TypeError: + except (TypeError, AttributeError): # If this is a file-like object, try to # fstat its file descriptor try: diff --git a/lib-python/2.7/idlelib/AutoCompleteWindow.py b/lib-python/2.7/idlelib/AutoCompleteWindow.py --- a/lib-python/2.7/idlelib/AutoCompleteWindow.py +++ b/lib-python/2.7/idlelib/AutoCompleteWindow.py @@ -192,6 +192,7 @@ scrollbar.config(command=listbox.yview) scrollbar.pack(side=RIGHT, fill=Y) listbox.pack(side=LEFT, fill=BOTH, expand=True) + acw.lift() # work around bug in Tk 8.5.18+ (issue #24570) # Initialize the listbox selection self.listbox.select_set(self._binary_search(self.start)) diff --git a/lib-python/2.7/idlelib/Bindings.py b/lib-python/2.7/idlelib/Bindings.py --- a/lib-python/2.7/idlelib/Bindings.py +++ b/lib-python/2.7/idlelib/Bindings.py @@ -76,7 +76,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py --- a/lib-python/2.7/idlelib/CallTipWindow.py +++ b/lib-python/2.7/idlelib/CallTipWindow.py @@ -72,6 +72,7 @@ background="#ffffe0", relief=SOLID, borderwidth=1, font = self.widget['font']) self.label.pack() + tw.lift() # work around bug in Tk 8.5.18+ (issue #24570) self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhide_event) diff --git a/lib-python/2.7/idlelib/ClassBrowser.py b/lib-python/2.7/idlelib/ClassBrowser.py --- a/lib-python/2.7/idlelib/ClassBrowser.py +++ b/lib-python/2.7/idlelib/ClassBrowser.py @@ -56,7 +56,7 @@ self.settitle() top.focus_set() # create scrolled canvas - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1) sc.frame.pack(expand=1, fill="both") diff --git a/lib-python/2.7/idlelib/ColorDelegator.py b/lib-python/2.7/idlelib/ColorDelegator.py --- a/lib-python/2.7/idlelib/ColorDelegator.py +++ b/lib-python/2.7/idlelib/ColorDelegator.py @@ -62,7 +62,7 @@ self.tag_raise('sel') def LoadTagDefs(self): - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs = { "COMMENT": idleConf.GetHighlight(theme, "comment"), "KEYWORD": idleConf.GetHighlight(theme, "keyword"), diff --git a/lib-python/2.7/idlelib/Debugger.py b/lib-python/2.7/idlelib/Debugger.py --- a/lib-python/2.7/idlelib/Debugger.py +++ b/lib-python/2.7/idlelib/Debugger.py @@ -17,7 +17,10 @@ self.set_step() return message = self.__frame2message(frame) - self.gui.interaction(message, frame) + try: + self.gui.interaction(message, frame) + except TclError: # When closing debugger window with [x] in 3.x + pass def user_exception(self, frame, info): if self.in_rpc_code(frame): @@ -59,8 +62,42 @@ self.frame = None self.make_gui() self.interacting = 0 + self.nesting_level = 0 def run(self, *args): + # Deal with the scenario where we've already got a program running + # in the debugger and we want to start another. If that is the case, + # our second 'run' was invoked from an event dispatched not from + # the main event loop, but from the nested event loop in 'interaction' + # below. So our stack looks something like this: + # outer main event loop + # run() + # + # callback to debugger's interaction() + # nested event loop + # run() for second command + # + # This kind of nesting of event loops causes all kinds of problems + # (see e.g. issue #24455) especially when dealing with running as a + # subprocess, where there's all kinds of extra stuff happening in + # there - insert a traceback.print_stack() to check it out. + # + # By this point, we've already called restart_subprocess() in + # ScriptBinding. However, we also need to unwind the stack back to + # that outer event loop. To accomplish this, we: + # - return immediately from the nested run() + # - abort_loop ensures the nested event loop will terminate + # - the debugger's interaction routine completes normally + # - the restart_subprocess() will have taken care of stopping + # the running program, which will also let the outer run complete + # + # That leaves us back at the outer main event loop, at which point our + # after event can fire, and we'll come back to this routine with a + # clean stack. + if self.nesting_level > 0: + self.abort_loop() + self.root.after(100, lambda: self.run(*args)) + return try: self.interacting = 1 return self.idb.run(*args) @@ -68,6 +105,10 @@ self.interacting = 0 def close(self, event=None): + try: + self.quit() + except Exception: + pass if self.interacting: self.top.bell() return @@ -191,7 +232,12 @@ b.configure(state="normal") # self.top.wakeup() - self.root.mainloop() + # Nested main loop: Tkinter's main loop is not reentrant, so use + # Tcl's vwait facility, which reenters the event loop until an + # event handler sets the variable we're waiting on + self.nesting_level += 1 + self.root.tk.call('vwait', '::idledebugwait') + self.nesting_level -= 1 # for b in self.buttons: b.configure(state="disabled") @@ -215,23 +261,26 @@ def cont(self): self.idb.set_continue() - self.root.quit() + self.abort_loop() def step(self): self.idb.set_step() - self.root.quit() + self.abort_loop() def next(self): self.idb.set_next(self.frame) - self.root.quit() + self.abort_loop() def ret(self): self.idb.set_return(self.frame) - self.root.quit() + self.abort_loop() def quit(self): self.idb.set_quit() - self.root.quit() + self.abort_loop() + + def abort_loop(self): + self.root.tk.call('set', '::idledebugwait', '1') stackviewer = None diff --git a/lib-python/2.7/idlelib/EditorWindow.py b/lib-python/2.7/idlelib/EditorWindow.py --- a/lib-python/2.7/idlelib/EditorWindow.py +++ b/lib-python/2.7/idlelib/EditorWindow.py @@ -9,7 +9,6 @@ import webbrowser from idlelib.MultiCall import MultiCallCreator -from idlelib import idlever from idlelib import WindowList from idlelib import SearchDialog from idlelib import GrepDialog @@ -18,6 +17,7 @@ from idlelib.configHandler import idleConf from idlelib import aboutDialog, textView, configDialog from idlelib import macosxSupport +from idlelib import help # The default tab setting for a Text widget, in average-width characters. TK_TABWIDTH_DEFAULT = 8 @@ -83,6 +83,11 @@ near - a Toplevel widget (e.g. EditorWindow or PyShell) to use as a reference for placing the help window """ + import warnings as w + w.warn("EditorWindow.HelpDialog is no longer used by Idle.\n" + "It will be removed in 3.6 or later.\n" + "It has been replaced by private help.HelpWindow\n", + DeprecationWarning, stacklevel=2) if self.dlg is None: self.show_dialog(parent) if near: @@ -109,9 +114,7 @@ self.dlg = None self.parent = None -helpDialog = HelpDialog() # singleton instance -def _help_dialog(parent): # wrapper for htest - helpDialog.show_dialog(parent) +helpDialog = HelpDialog() # singleton instance, no longer used class EditorWindow(object): @@ -154,7 +157,6 @@ EditorWindow.help_url = 'file://' + EditorWindow.help_url else: EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2] - currentTheme=idleConf.CurrentTheme() self.flist = flist root = root or flist.root self.root = root @@ -182,6 +184,7 @@ 'name': 'text', 'padx': 5, 'wrap': 'none', + 'highlightthickness': 0, 'width': self.width, 'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')} if TkVersion >= 8.5: @@ -200,13 +203,13 @@ if macosxSupport.isAquaTk(): # Command-W on editorwindows doesn't work without this. text.bind('<>', self.close_event) - # Some OS X systems have only one mouse button, - # so use control-click for pulldown menus there. - # (Note, AquaTk defines <2> as the right button if - # present and the Tk Text widget already binds <2>.) + # Some OS X systems have only one mouse button, so use + # control-click for popup context menus there. For two + # buttons, AquaTk defines <2> as the right button, not <3>. text.bind("",self.right_menu_event) + text.bind("<2>", self.right_menu_event) else: - # Elsewhere, use right-click for pulldown menus. + # Elsewhere, use right-click for popup menus. text.bind("<3>",self.right_menu_event) text.bind("<>", self.cut) text.bind("<>", self.copy) @@ -216,8 +219,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -258,13 +259,7 @@ vbar['command'] = text.yview vbar.pack(side=RIGHT, fill=Y) text['yscrollcommand'] = vbar.set - fontWeight = 'normal' - if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'): - fontWeight='bold' - text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'), - idleConf.GetOption('main', 'EditorWindow', - 'font-size', type='int'), - fontWeight)) + text['font'] = idleConf.GetFont(self.root, 'main', 'EditorWindow') text_frame.pack(side=LEFT, fill=BOTH, expand=1) text.pack(side=TOP, fill=BOTH, expand=1) text.focus_set() @@ -318,7 +313,7 @@ io.set_filename_change_hook(self.filename_change_hook) # Create the recent files submenu - self.recent_files_menu = Menu(self.menubar) + self.recent_files_menu = Menu(self.menubar, tearoff=0) self.menudict['file'].insert_cascade(3, label='Recent Files', underline=0, menu=self.recent_files_menu) @@ -353,36 +348,6 @@ self.askinteger = tkSimpleDialog.askinteger self.showerror = tkMessageBox.showerror - self._highlight_workaround() # Fix selection tags on Windows - - def _highlight_workaround(self): - # On Windows, Tk removes painting of the selection - # tags which is different behavior than on Linux and Mac. - # See issue14146 for more information. - if not sys.platform.startswith('win'): - return - - text = self.text - text.event_add("<>", "") - text.event_add("<>", "") - def highlight_fix(focus): - sel_range = text.tag_ranges("sel") - if sel_range: - if focus == 'out': - HILITE_CONFIG = idleConf.GetHighlight( - idleConf.CurrentTheme(), 'hilite') - text.tag_config("sel_fix", HILITE_CONFIG) - text.tag_raise("sel_fix") - text.tag_add("sel_fix", *sel_range) - elif focus == 'in': - text.tag_remove("sel_fix", "1.0", "end") - - text.bind("<>", - lambda ev: highlight_fix("out")) - text.bind("<>", - lambda ev: highlight_fix("in")) - - def _filename_to_unicode(self, filename): """convert filename to unicode in order to display it in Tk""" if isinstance(filename, unicode) or not filename: @@ -446,6 +411,7 @@ def set_status_bar(self): self.status_bar = self.MultiStatusBar(self.top) + sep = Frame(self.top, height=1, borderwidth=1, background='grey75') if sys.platform == "darwin": # Insert some padding to avoid obscuring some of the statusbar # by the resize widget. @@ -453,6 +419,7 @@ self.status_bar.set_label('column', 'Col: ?', side=RIGHT) self.status_bar.set_label('line', 'Ln: ?', side=RIGHT) self.status_bar.pack(side=BOTTOM, fill=X) + sep.pack(side=BOTTOM, fill=X) self.text.bind("<>", self.set_line_and_column) self.text.event_add("<>", "", "") @@ -479,12 +446,13 @@ self.menudict = menudict = {} for name, label in self.menu_specs: underline, label = prepstr(label) - menudict[name] = menu = Menu(mbar, name=name) + menudict[name] = menu = Menu(mbar, name=name, tearoff=0) mbar.add_cascade(label=label, menu=menu, underline=underline) if macosxSupport.isCarbonTk(): # Insert the application menu - menudict['application'] = menu = Menu(mbar, name='apple') + menudict['application'] = menu = Menu(mbar, name='apple', + tearoff=0) mbar.add_cascade(label='IDLE', menu=menu) self.fill_menus() @@ -565,19 +533,23 @@ return 'normal' def about_dialog(self, event=None): + "Handle Help 'About IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.about_dialog. aboutDialog.AboutDialog(self.top,'About IDLE') def config_dialog(self, event=None): + "Handle Options 'Configure IDLE' event." + # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - configDialog.ConfigExtensionsDialog(self.top) def help_dialog(self, event=None): + "Handle Help 'IDLE Help' event." + # Synchronize with macosxSupport.overrideRootMenu.help_dialog. if self.root: parent = self.root else: parent = self.top - helpDialog.display(parent, near=self.top) + help.show_idlehelp(parent) def python_docs(self, event=None): if sys.platform[:3] == 'win': @@ -785,7 +757,7 @@ # Called from self.filename_change_hook and from configDialog.py self._rmcolorizer() self._addcolorizer() - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() normal_colors = idleConf.GetHighlight(theme, 'normal') cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg') select_colors = idleConf.GetHighlight(theme, 'hilite') @@ -796,17 +768,15 @@ selectforeground=select_colors['foreground'], selectbackground=select_colors['background'], ) + if TkVersion >= 8.5: + self.text.config( + inactiveselectbackground=select_colors['background']) def ResetFont(self): "Update the text widgets' font if it is changed" # Called from configDialog.py - fontWeight='normal' - if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'): - fontWeight='bold' - self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'), - idleConf.GetOption('main','EditorWindow','font-size', - type='int'), - fontWeight)) + + self.text['font'] = idleConf.GetFont(self.root, 'main','EditorWindow') def RemoveKeybindings(self): "Remove the keybindings before they are changed." @@ -920,8 +890,10 @@ except IOError as err: if not getattr(self.root, "recentfilelist_error_displayed", False): self.root.recentfilelist_error_displayed = True - tkMessageBox.showerror(title='IDLE Error', - message='Unable to update Recent Files list:\n%s' + tkMessageBox.showwarning(title='IDLE Warning', + message="Cannot update File menu Recent Files list. " + "Your operating system says:\n%s\n" + "Select OK and IDLE will continue without updating." % str(err), parent=self.text) # for each edit window instance, construct the recent files menu @@ -1729,4 +1701,4 @@ if __name__ == '__main__': from idlelib.idle_test.htest import run - run(_help_dialog, _editor_window) + run(_editor_window) diff --git a/lib-python/2.7/idlelib/GrepDialog.py b/lib-python/2.7/idlelib/GrepDialog.py --- a/lib-python/2.7/idlelib/GrepDialog.py +++ b/lib-python/2.7/idlelib/GrepDialog.py @@ -1,3 +1,4 @@ +from __future__ import print_function import os import fnmatch import re # for htest @@ -5,7 +6,6 @@ from Tkinter import StringVar, BooleanVar, Checkbutton # for GrepDialog from Tkinter import Tk, Text, Button, SEL, END # for htest from idlelib import SearchEngine -import itertools from idlelib.SearchDialogBase import SearchDialogBase # Importing OutputWindow fails due to import loop # EditorWindow -> GrepDialop -> OutputWindow -> EditorWindow diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py --- a/lib-python/2.7/idlelib/IOBinding.py +++ b/lib-python/2.7/idlelib/IOBinding.py @@ -5,22 +5,18 @@ # end-of-line conventions, instead of relying on the standard library, # which will only understand the local convention. +import codecs +from codecs import BOM_UTF8 import os -import types import pipes +import re import sys -import codecs import tempfile + import tkFileDialog import tkMessageBox -import re -from Tkinter import * from SimpleDialog import SimpleDialog -from idlelib.configHandler import idleConf - -from codecs import BOM_UTF8 - # Try setting the locale, so that we can find out # what encoding to use try: @@ -251,7 +247,7 @@ with open(filename, 'rb') as f: chars = f.read() except IOError as msg: - tkMessageBox.showerror("I/O Error", str(msg), master=self.text) + tkMessageBox.showerror("I/O Error", str(msg), parent=self.text) return False chars = self.decode(chars) @@ -298,7 +294,7 @@ title="Error loading the file", message="The encoding '%s' is not known to this Python "\ "installation. The file may not display correctly" % name, - master = self.text) + parent = self.text) enc = None if enc: try: @@ -328,7 +324,7 @@ title="Save On Close", message=message, default=tkMessageBox.YES, - master=self.text) + parent=self.text) if confirm: reply = "yes" self.save(None) @@ -387,11 +383,11 @@ return True except IOError as msg: tkMessageBox.showerror("I/O Error", str(msg), - master=self.text) + parent=self.text) return False def encode(self, chars): - if isinstance(chars, types.StringType): + if isinstance(chars, str): # This is either plain ASCII, or Tk was returning mixed-encoding # text to us. Don't try to guess further. return chars @@ -417,7 +413,7 @@ tkMessageBox.showerror( "I/O Error", "%s. Saving as UTF-8" % failed, - master = self.text) + parent = self.text) # If there was a UTF-8 signature, use that. This should not fail if self.fileencoding == BOM_UTF8 or failed: return BOM_UTF8 + chars.encode("utf-8") @@ -430,7 +426,7 @@ "I/O Error", "Cannot save this as '%s' anymore. Saving as UTF-8" \ % self.fileencoding, - master = self.text) + parent = self.text) return BOM_UTF8 + chars.encode("utf-8") # Nothing was declared, and we had not determined an encoding # on loading. Recommend an encoding line. @@ -474,7 +470,7 @@ title="Print", message="Print to Default Printer", default=tkMessageBox.OK, - master=self.text) + parent=self.text) if not confirm: self.text.focus_set() return "break" @@ -511,10 +507,10 @@ status + output if output: output = "Printing command: %s\n" % repr(command) + output - tkMessageBox.showerror("Print status", output, master=self.text) + tkMessageBox.showerror("Print status", output, parent=self.text) else: #no printing for this platform message = "Printing is not enabled for this platform: %s" % platform - tkMessageBox.showinfo("Print status", message, master=self.text) + tkMessageBox.showinfo("Print status", message, parent=self.text) if tempfilename: os.unlink(tempfilename) return "break" @@ -533,7 +529,7 @@ def askopenfile(self): dir, base = self.defaultfilename("open") if not self.opendialog: - self.opendialog = tkFileDialog.Open(master=self.text, + self.opendialog = tkFileDialog.Open(parent=self.text, filetypes=self.filetypes) filename = self.opendialog.show(initialdir=dir, initialfile=base) if isinstance(filename, unicode): @@ -556,7 +552,7 @@ dir, base = self.defaultfilename("save") if not self.savedialog: self.savedialog = tkFileDialog.SaveAs( - master=self.text, + parent=self.text, filetypes=self.filetypes, defaultextension=self.defaultextension) filename = self.savedialog.show(initialdir=dir, initialfile=base) @@ -568,8 +564,12 @@ "Update recent file list on all editor windows" self.editwin.update_recent_files_list(filename) -def _io_binding(parent): - root = Tk() + +def _io_binding(parent): # htest # + from Tkinter import Toplevel, Text + from idlelib.configHandler import idleConf + + root = Toplevel(parent) root.title("Test IOBinding") width, height, x, y = list(map(int, re.split('[x+]', parent.geometry()))) root.geometry("+%d+%d"%(x, y + 150)) @@ -586,12 +586,13 @@ self.text.event_generate("<>") def save(self, event): self.text.event_generate("<>") + def update_recent_files_list(s, f): pass text = Text(root) text.pack() text.focus_set() editwin = MyEditWin(text) - io = IOBinding(editwin) + IOBinding(editwin) if __name__ == "__main__": from idlelib.idle_test.htest import run diff --git a/lib-python/2.7/idlelib/MultiStatusBar.py b/lib-python/2.7/idlelib/MultiStatusBar.py --- a/lib-python/2.7/idlelib/MultiStatusBar.py +++ b/lib-python/2.7/idlelib/MultiStatusBar.py @@ -8,13 +8,15 @@ Frame.__init__(self, master, **kw) self.labels = {} - def set_label(self, name, text='', side=LEFT): + def set_label(self, name, text='', side=LEFT, width=0): if name not in self.labels: - label = Label(self, bd=1, relief=SUNKEN, anchor=W) - label.pack(side=side) + label = Label(self, borderwidth=0, anchor=W) + label.pack(side=side, pady=0, padx=4) self.labels[name] = label else: label = self.labels[name] + if width != 0: + label.config(width=width) label.config(text=text) def _multistatus_bar(parent): diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt --- a/lib-python/2.7/idlelib/NEWS.txt +++ b/lib-python/2.7/idlelib/NEWS.txt @@ -1,7 +1,139 @@ +What's New in IDLE 2.7.11? +========================== +*Release date: 2015-12-06* + +- Issue 15348: Stop the debugger engine (normally in a user process) + before closing the debugger window (running in the IDLE process). + This prevents the RuntimeErrors that were being caught and ignored. + +- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the + debugger is active (15347); b) closing the debugger with the [X] button + (15348); and c) activating the debugger when already active (24455). + The patch by Mark Roseman does this by making two changes. + 1. Suspend and resume the gui.interaction method with the tcl vwait + mechanism intended for this purpose (instead of root.mainloop & .quit). + 2. In gui.run, allow any existing interaction to terminate first. + +- Change 'The program' to 'Your program' in an IDLE 'kill program?' message + to make it clearer that the program referred to is the currently running + user program, not IDLE itself. + +- Issue #24750: Improve the appearance of the IDLE editor window status bar. + Patch by Mark Roseman. + +- Issue #25313: Change the handling of new built-in text color themes to better + address the compatibility problem introduced by the addition of IDLE Dark. + Consistently use the revised idleConf.CurrentTheme everywhere in idlelib. + +- Issue #24782: Extension configuration is now a tab in the IDLE Preferences + dialog rather than a separate dialog. The former tabs are now a sorted + list. Patch by Mark Roseman. + +- Issue #22726: Re-activate the config dialog help button with some content + about the other buttons and the new IDLE Dark theme. + +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + To use it with IDLEs released before November 2015, hit the + 'Save as New Custom Theme' button and enter a new name, + such as 'Custom Dark'. The custom theme will work with any IDLE + release, and can be modified. + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc chapter. + 'IDLE' now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + +- Issue #24972: Inactive selection background now matches active selection + background, as configured by users, on all systems. Found items are now + always highlighted on Windows. Initial patch by Mark Roseman. + +- Issue #24570: Idle: make calltip and completion boxes appear on Macs + affected by a tk regression. Initial patch by Mark Roseman. + +- Issue #24988: Idle ScrolledList context menus (used in debugger) + now work on Mac Aqua. Patch by Mark Roseman. + +- Issue #24801: Make right-click for context menu work on Mac Aqua. + Patch by Mark Roseman. + +- Issue #25173: Associate tkinter messageboxes with a specific widget. + For Mac OSX, make them a 'sheet'. Patch by Mark Roseman. + +- Issue #25198: Enhance the initial html viewer now used for Idle Help. + * Properly indent fixed-pitch text (patch by Mark Roseman). + * Give code snippet a very Sphinx-like light blueish-gray background. + * Re-use initial width and height set by users for shell and editor. + * When the Table of Contents (TOC) menu is used, put the section header + at the top of the screen. + +- Issue #25225: Condense and rewrite Idle doc section on text colors. + +- Issue #21995: Explain some differences between IDLE and console Python. + +- Issue #22820: Explain need for *print* when running file from Idle editor. + +- Issue #25224: Doc: augment Idle feature list and no-subprocess section. + +- Issue #25219: Update doc for Idle command line options. + Some were missing and notes were not correct. + +- Issue #24861: Most of idlelib is private and subject to change. + Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__. + +- Issue #25199: Idle: add synchronization comments for future maintainers. + +- Issue #16893: Replace help.txt with help.html for Idle doc display. + The new idlelib/help.html is rstripped Doc/build/html/library/idle.html. + It looks better than help.txt and will better document Idle as released. + The tkinter html viewer that works for this file was written by Mark Roseman. + The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated. + +- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6. + +- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts). + +- Issue #23672: Allow Idle to edit and run files with astral chars in name. + Patch by Mohd Sanad Zaki Rizvi. + +- Issue 24745: Idle editor default font. Switch from Courier to + platform-sensitive TkFixedFont. This should not affect current customized + font selections. If there is a problem, edit $HOME/.idlerc/config-main.cfg + and remove 'fontxxx' entries from [Editor Window]. Patch by Mark Roseman. + +- Issue #21192: Idle editor. When a file is run, put its name in the restart bar. + Do not print false prompts. Original patch by Adnan Umer. + +- Issue #13884: Idle menus. Remove tearoff lines. Patch by Roger Serwy. + +- Issue #15809: IDLE shell now uses locale encoding instead of Latin1 for + decoding unicode literals. + + +What's New in IDLE 2.7.10? +========================= +*Release date: 2015-05-23* + +- Issue #23583: Fixed writing unicode to standard output stream in IDLE. + +- Issue #20577: Configuration of the max line length for the FormatParagraph + extension has been moved from the General tab of the Idle preferences dialog + to the FormatParagraph tab of the Config Extensions dialog. + Patch by Tal Einat. + +- Issue #16893: Update Idle doc chapter to match current Idle and add new + information. + +- Issue #23180: Rename IDLE "Windows" menu item to "Window". + Patch by Al Sweigart. + + What's New in IDLE 2.7.9? ========================= - -*Release data: 2014-12-07* (projected) +*Release date: 2014-12-10* - Issue #16893: Update Idle doc chapter to match current Idle and add new information. @@ -35,7 +167,6 @@ What's New in IDLE 2.7.8? ========================= - *Release date: 2014-06-29* - Issue #21940: Add unittest for WidgetRedirector. Initial patch by Saimadhav @@ -63,7 +194,6 @@ What's New in IDLE 2.7.7? ========================= - *Release date: 2014-05-31* - Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin @@ -101,7 +231,6 @@ What's New in IDLE 2.7.6? ========================= - *Release date: 2013-11-10* - Issue #19426: Fixed the opening of Python source file with specified encoding. @@ -149,7 +278,6 @@ What's New in IDLE 2.7.5? ========================= - *Release date: 2013-05-12* - Issue #17838: Allow sys.stdin to be reassigned. @@ -184,7 +312,6 @@ What's New in IDLE 2.7.4? ========================= - *Release date: 2013-04-06* - Issue #17625: In IDLE, close the replace dialog after it is used. @@ -255,7 +382,6 @@ What's New in IDLE 2.7.3? ========================= - *Release date: 2012-04-09* - Issue #964437 Make IDLE help window non-modal. @@ -288,7 +414,6 @@ What's New in IDLE 2.7.2? ========================= - *Release date: 2011-06-11* - Issue #11718: IDLE's open module dialog couldn't find the __init__.py @@ -333,7 +458,6 @@ What's New in Python 2.7.1? =========================== - *Release date: 2010-11-27* - Issue #6378: idle.bat now runs with the appropriate Python version rather than @@ -342,7 +466,6 @@ What's New in IDLE 2.7? ======================= - *Release date: 2010-07-03* - Issue #5150: IDLE's format menu now has an option to strip trailing @@ -374,7 +497,6 @@ What's New in IDLE 2.6? ======================= - *Release date: 01-Oct-2008* - Issue #2665: On Windows, an IDLE installation upgraded from an old version @@ -388,11 +510,6 @@ - Autocompletion of filenames now support alternate separators, e.g. the '/' char on Windows. Patch 2061 Tal Einat. -What's New in IDLE 2.6a1? -========================= - -*Release date: 29-Feb-2008* - - Configured selection highlighting colors were ignored; updating highlighting in the config dialog would cause non-Python files to be colored as if they were Python source; improve use of ColorDelagator. Patch 1334. Tal Einat. @@ -464,15 +581,8 @@ What's New in IDLE 1.2? ======================= - *Release date: 19-SEP-2006* - -What's New in IDLE 1.2c1? -========================= - -*Release date: 17-AUG-2006* - - File menu hotkeys: there were three 'p' assignments. Reassign the 'Save Copy As' and 'Print' hotkeys to 'y' and 't'. Change the Shell hotkey from 's' to 'l'. @@ -493,11 +603,6 @@ - When used w/o subprocess, all exceptions were preceded by an error message claiming they were IDLE internal errors (since 1.2a1). -What's New in IDLE 1.2b3? -========================= - -*Release date: 03-AUG-2006* - - Bug #1525817: Don't truncate short lines in IDLE's tool tips. - Bug #1517990: IDLE keybindings on MacOS X now work correctly @@ -521,26 +626,6 @@ 'as' keyword in comment directly following import command. Closes 1325071. Patch 1479219 Tal Einat -What's New in IDLE 1.2b2? -========================= - -*Release date: 11-JUL-2006* - -What's New in IDLE 1.2b1? -========================= - -*Release date: 20-JUN-2006* - -What's New in IDLE 1.2a2? -========================= - -*Release date: 27-APR-2006* - -What's New in IDLE 1.2a1? -========================= - -*Release date: 05-APR-2006* - - Patch #1162825: Support non-ASCII characters in IDLE window titles. - Source file f.flush() after writing; trying to avoid lossage if user @@ -620,19 +705,14 @@ - The remote procedure call module rpc.py can now access data attributes of remote registered objects. Changes to these attributes are local, however. + What's New in IDLE 1.1? ======================= - *Release date: 30-NOV-2004* - On OpenBSD, terminating IDLE with ctrl-c from the command line caused a stuck subprocess MainThread because only the SocketThread was exiting. -What's New in IDLE 1.1b3/rc1? -============================= - -*Release date: 18-NOV-2004* - - Saving a Keyset w/o making changes (by using the "Save as New Custom Key Set" button) caused IDLE to fail on restart (no new keyset was created in config-keys.cfg). Also true for Theme/highlights. Python Bug 1064535. @@ -640,28 +720,12 @@ - A change to the linecache.py API caused IDLE to exit when an exception was raised while running without the subprocess (-n switch). Python Bug 1063840. -What's New in IDLE 1.1b2? -========================= - -*Release date: 03-NOV-2004* - - When paragraph reformat width was made configurable, a bug was introduced that caused reformatting of comment blocks to ignore how far the block was indented, effectively adding the indentation width to the reformat width. This has been repaired, and the reformat width is again a bound on the total width of reformatted lines. -What's New in IDLE 1.1b1? -========================= - -*Release date: 15-OCT-2004* - - -What's New in IDLE 1.1a3? -========================= - -*Release date: 02-SEP-2004* - - Improve keyboard focus binding, especially in Windows menu. Improve window raising, especially in the Windows menu and in the debugger. IDLEfork 763524. @@ -669,24 +733,12 @@ - If user passes a non-existent filename on the commandline, just open a new file, don't raise a dialog. IDLEfork 854928. - -What's New in IDLE 1.1a2? -========================= - -*Release date: 05-AUG-2004* - - EditorWindow.py was not finding the .chm help file on Windows. Typo at Rev 1.54. Python Bug 990954 - checking sys.platform for substring 'win' was breaking IDLE docs on Mac (darwin). Also, Mac Safari browser requires full file:// URIs. SF 900580. - -What's New in IDLE 1.1a1? -========================= - -*Release date: 08-JUL-2004* - - Redirect the warning stream to the shell during the ScriptBinding check of user code and format the warning similarly to an exception for both that check and for runtime warnings raised in the subprocess. @@ -749,26 +801,10 @@ What's New in IDLE 1.0? ======================= - *Release date: 29-Jul-2003* -- Added a banner to the shell discussing warnings possibly raised by personal - firewall software. Added same comment to README.txt. - - -What's New in IDLE 1.0 release candidate 2? -=========================================== - -*Release date: 24-Jul-2003* - - Calltip error when docstring was None Python Bug 775541 - -What's New in IDLE 1.0 release candidate 1? -=========================================== - -*Release date: 18-Jul-2003* - - Updated extend.txt, help.txt, and config-extensions.def to correctly reflect the current status of the configuration system. Python Bug 768469 @@ -784,12 +820,6 @@ sys.std{in|out|err}.encoding, for both the local and the subprocess case. SF IDLEfork patch 682347. - -What's New in IDLE 1.0b2? -========================= - -*Release date: 29-Jun-2003* - - Extend AboutDialog.ViewFile() to support file encodings. Make the CREDITS file Latin-1. @@ -828,7 +858,6 @@ What's New in IDLEfork 0.9b1? ============================= - *Release date: 02-Jun-2003* - The current working directory of the execution environment (and shell @@ -930,10 +959,8 @@ exception formatting to the subprocess. - What's New in IDLEfork 0.9 Alpha 2? =================================== - *Release date: 27-Jan-2003* - Updated INSTALL.txt to claify use of the python2 rpm. @@ -1037,7 +1064,6 @@ What's New in IDLEfork 0.9 Alpha 1? =================================== - *Release date: 31-Dec-2002* - First release of major new functionality. For further details refer to diff --git a/lib-python/2.7/idlelib/OutputWindow.py b/lib-python/2.7/idlelib/OutputWindow.py --- a/lib-python/2.7/idlelib/OutputWindow.py +++ b/lib-python/2.7/idlelib/OutputWindow.py @@ -96,7 +96,7 @@ "No special line", "The line you point at doesn't look like " "a valid file name followed by a line number.", - master=self.text) + parent=self.text) return filename, lineno = result edit = self.flist.open(filename) diff --git a/lib-python/2.7/idlelib/PathBrowser.py b/lib-python/2.7/idlelib/PathBrowser.py --- a/lib-python/2.7/idlelib/PathBrowser.py +++ b/lib-python/2.7/idlelib/PathBrowser.py @@ -17,6 +17,7 @@ self.init(flist) def settitle(self): + "Set window titles." self.top.wm_title("Path Browser") self.top.wm_iconname("Path Browser") @@ -70,7 +71,7 @@ def ispackagedir(self, file): if not os.path.isdir(file): - return 0 + return False init = os.path.join(file, "__init__.py") return os.path.exists(init) @@ -91,7 +92,7 @@ sorted.sort() return sorted -def _path_browser(parent): +def _path_browser(parent): # htest # flist = PyShellFileList(parent) PathBrowser(flist, _htest=True) parent.mainloop() diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py --- a/lib-python/2.7/idlelib/PyShell.py +++ b/lib-python/2.7/idlelib/PyShell.py @@ -10,8 +10,6 @@ import socket import time import threading -import traceback -import types import io import linecache @@ -32,11 +30,11 @@ from idlelib.UndoDelegator import UndoDelegator from idlelib.OutputWindow import OutputWindow from idlelib.configHandler import idleConf -from idlelib import idlever from idlelib import rpc from idlelib import Debugger from idlelib import RemoteDebugger from idlelib import macosxSupport +from idlelib import IOBinding IDENTCHARS = string.ascii_letters + string.digits + "_" HOST = '127.0.0.1' # python execution server on localhost loopback @@ -160,7 +158,7 @@ # possible due to update in restore_file_breaks return if color: - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() cfg = idleConf.GetHighlight(theme, "break") else: cfg = {'foreground': '', 'background': ''} @@ -171,7 +169,7 @@ filename = self.io.filename text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1)) try: - i = self.breakpoints.index(lineno) + self.breakpoints.index(lineno) except ValueError: # only add if missing, i.e. do once self.breakpoints.append(lineno) try: # update the subprocess debugger @@ -345,7 +343,7 @@ def LoadTagDefs(self): ColorDelegator.LoadTagDefs(self) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() self.tagdefs.update({ "stdin": {'background':None,'foreground':None}, "stdout": idleConf.GetHighlight(theme, "stdout"), @@ -439,7 +437,7 @@ try: self.rpcclt = MyRPCClient(addr) break - except socket.error as err: + except socket.error: pass else: self.display_port_binding_error() @@ -460,7 +458,7 @@ self.rpcclt.listening_sock.settimeout(10) try: self.rpcclt.accept() - except socket.timeout as err: + except socket.timeout: self.display_no_subprocess_error() return None self.rpcclt.register("console", self.tkconsole) @@ -474,7 +472,7 @@ self.poll_subprocess() return self.rpcclt - def restart_subprocess(self, with_cwd=False): + def restart_subprocess(self, with_cwd=False, filename=''): if self.restarting: return self.rpcclt self.restarting = True @@ -495,25 +493,24 @@ self.spawn_subprocess() try: self.rpcclt.accept() - except socket.timeout as err: + except socket.timeout: self.display_no_subprocess_error() return None self.transfer_path(with_cwd=with_cwd) console.stop_readline() # annotate restart in shell window and mark it console.text.delete("iomark", "end-1c") - if was_executing: - console.write('\n') - console.showprompt() - halfbar = ((int(console.width) - 16) // 2) * '=' - console.write(halfbar + ' RESTART ' + halfbar) + tag = 'RESTART: ' + (filename if filename else 'Shell') + halfbar = ((int(console.width) -len(tag) - 4) // 2) * '=' + console.write("\n{0} {1} {0}".format(halfbar, tag)) console.text.mark_set("restart", "end-1c") console.text.mark_gravity("restart", "left") - console.showprompt() + if not filename: + console.showprompt() # restart subprocess debugger if debug: # Restarted debugger connects to current instance of debug GUI - gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt) + RemoteDebugger.restart_subprocess_debugger(self.rpcclt) # reload remote debugger breakpoints for all PyShellEditWindows debug.load_breakpoints() self.compile.compiler.flags = self.original_compiler_flags @@ -634,7 +631,7 @@ item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid) from idlelib.TreeWidget import ScrolledCanvas, TreeNode top = Toplevel(self.tkconsole.root) - theme = idleConf.GetOption('main','Theme','name') + theme = idleConf.CurrentTheme() background = idleConf.GetHighlight(theme, 'normal')['background'] sc = ScrolledCanvas(top, bg=background, highlightthickness=0) sc.frame.pack(expand=1, fill="both") @@ -654,7 +651,7 @@ if source is None: source = open(filename, "r").read() try: - code = compile(source, filename, "exec") + code = compile(source, filename, "exec", dont_inherit=True) except (OverflowError, SyntaxError): self.tkconsole.resetoutput() print('*** Error in script or command!\n' @@ -671,10 +668,11 @@ self.more = 0 self.save_warnings_filters = warnings.filters[:] warnings.filterwarnings(action="error", category=SyntaxWarning) - if isinstance(source, types.UnicodeType): - from idlelib import IOBinding + if isinstance(source, unicode) and IOBinding.encoding != 'utf-8': try: - source = source.encode(IOBinding.encoding) + source = '# -*- coding: %s -*-\n%s' % ( + IOBinding.encoding, + source.encode(IOBinding.encoding)) except UnicodeError: self.tkconsole.resetoutput() self.write("Unsupported characters in input\n") @@ -801,7 +799,7 @@ "Exit?", "Do you want to exit altogether?", default="yes", - master=self.tkconsole.text): + parent=self.tkconsole.text): raise else: self.showtraceback() @@ -839,7 +837,7 @@ "Run IDLE with the -n command line switch to start without a " "subprocess and refer to Help/IDLE Help 'Running without a " "subprocess' for further details.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_no_subprocess_error(self): tkMessageBox.showerror( @@ -847,14 +845,14 @@ "IDLE's subprocess didn't make connection. Either IDLE can't " "start a subprocess or personal firewall software is blocking " "the connection.", - master=self.tkconsole.text) + parent=self.tkconsole.text) def display_executing_dialog(self): tkMessageBox.showerror( "Already executing", "The Python Shell window is already executing a command; " "please wait until it is finished.", - master=self.tkconsole.text) + parent=self.tkconsole.text) From pypy.commits at gmail.com Tue Dec 22 03:01:24 2015 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Dec 2015 00:01:24 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.11: bump to 2.7.11 Message-ID: <56790354.4d5d1c0a.f5527.3c19@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r81419:5a563e5a5512 Date: 2015-12-21 23:49 -0800 http://bitbucket.org/pypy/pypy/changeset/5a563e5a5512/ Log: bump to 2.7.11 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,12 +21,12 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 10 +#define PY_MICRO_VERSION 11 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.10" +#define PY_VERSION "2.7.11" /* PyPy version as a string */ #define PYPY_VERSION "4.1.0-alpha0" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 10, "final", 42) +CPYTHON_VERSION = (2, 7, 11, "final", 42) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From pypy.commits at gmail.com Tue Dec 22 08:20:32 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Dec 2015 05:20:32 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: return types test for call release gil test passes, needed to save the floating point return value on the stack Message-ID: <56794e20.0357c20a.f7afb.ffffb453@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81420:2a2bdeba9644 Date: 2015-12-22 14:19 +0100 http://bitbucket.org/pypy/pypy/changeset/2a2bdeba9644/ Log: return types test for call release gil test passes, needed to save the floating point return value on the stack diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -154,6 +154,7 @@ RFASTGILPTR = self.RFASTGILPTR # self.mc.STMG(RSHADOWOLD, self.RFASTGILPTR, l.addr(-3*WORD, r.SP)) + # 3 for the three registers, 1 for a floating point return value! self.subtracted_to_sp += 4*WORD # # Save this thread's shadowstack pointer into r29, for later comparison @@ -226,18 +227,14 @@ if reg.is_core_reg(): self.mc.LGR(RSAVEDRES, reg) elif reg.is_fp_reg(): - xxx - self.mc.stfd(reg.value, r.SP.value, - PARAM_SAVE_AREA_OFFSET + 7 * WORD) + self.mc.STD(reg, l.addr(-4*WORD, r.SP)) self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) self.mc.raw_call() if reg is not None: if reg.is_core_reg(): self.mc.LGR(reg, RSAVEDRES) elif reg.is_fp_reg(): - xxx - self.mc.lfd(reg.value, r.SP.value, - PARAM_SAVE_AREA_OFFSET + 7 * WORD) + self.mc.LD(reg, l.addr(-4*WORD, r.SP)) # replace b1_location with BEQ(here) pmc = OverwritingBuilder(self.mc, b1_location, 1) From pypy.commits at gmail.com Tue Dec 22 11:35:57 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Dec 2015 08:35:57 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: adpating errno saving and restoring for call release gil, first part of the test passes Message-ID: <56797bed.26acc20a.37a98.05fb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81421:ceb6d557fc88 Date: 2015-12-22 17:34 +0100 http://bitbucket.org/pypy/pypy/changeset/ceb6d557fc88/ Log: adpating errno saving and restoring for call release gil, first part of the test passes diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2997,19 +2997,23 @@ if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("not on LLGraph") eci = ExternalCompilationInfo( - separate_module_sources=[''' + separate_module_sources=[""" #include + #include static long f1(long a, long b, long c, long d, long e, long f, long g) { errno = 42; - return (a + 10*b + 100*c + 1000*d + + printf("value: a %d, b %d, c %d, d %d, e %d, f %d, g %d \\n", a,b,c,d,e,f,g); + long v = (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); + printf("value: %d\\n", v); + return v; } RPY_EXPORTED long test_call_release_gil_save_errno(void) { return (long)&f1; } - ''']) + """]) fn_name = 'test_call_release_gil_save_errno' getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, compilation_info=eci, _nowrapper=True) @@ -3019,8 +3023,8 @@ # for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO, - rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO, - rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO, + #rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO, + #rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO, ]: faildescr = BasicFailDescr(1) inputargs = [InputArgInt() for i in range(7)] diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -76,6 +76,9 @@ self.subtracted_to_sp += len(stack_params) * 8 base = -len(stack_params) * 8 + if self.is_call_release_gil: + self.subtracted_to_sp += 8*WORD + base -= 8*WORD for idx,i in enumerate(stack_params): loc = arglocs[i] offset = base + 8 * idx @@ -153,9 +156,9 @@ RSHADOWPTR = self.RSHADOWPTR RFASTGILPTR = self.RFASTGILPTR # - self.mc.STMG(RSHADOWOLD, self.RFASTGILPTR, l.addr(-3*WORD, r.SP)) - # 3 for the three registers, 1 for a floating point return value! - self.subtracted_to_sp += 4*WORD + self.mc.STMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) + # 6 registers, 1 for a floating point return value! + # registered by prepare_arguments! # # Save this thread's shadowstack pointer into r29, for later comparison gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap @@ -225,16 +228,16 @@ PARAM_SAVE_AREA_OFFSET = 0 if reg is not None: if reg.is_core_reg(): - self.mc.LGR(RSAVEDRES, reg) + self.mc.STG(reg, l.addr(-7*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.STD(reg, l.addr(-4*WORD, r.SP)) + self.mc.STD(reg, l.addr(-7*WORD, r.SP)) self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) self.mc.raw_call() if reg is not None: if reg.is_core_reg(): - self.mc.LGR(reg, RSAVEDRES) + self.mc.LG(reg, l.addr(-7*WORD, r.SP)) elif reg.is_fp_reg(): - self.mc.LD(reg, l.addr(-4*WORD, r.SP)) + self.mc.LD(reg, l.addr(-7*WORD, r.SP)) # replace b1_location with BEQ(here) pmc = OverwritingBuilder(self.mc, b1_location, 1) @@ -242,14 +245,13 @@ pmc.overwrite() # restore the values that might have been overwritten - self.mc.LMG(RSHADOWOLD, RFASTGILPTR, l.addr(-3*WORD, r.SP)) + self.mc.LMG(r.r8, r.r13, l.addr(-7*WORD, r.SP)) def write_real_errno(self, save_err): if save_err & rffi.RFFI_READSAVED_ERRNO: # Just before a call, read '*_errno' and write it into the - # real 'errno'. A lot of registers are free here, notably - # r11 and r0. + # real 'errno'. if save_err & rffi.RFFI_ALT_ERRNO: rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu) else: @@ -277,7 +279,7 @@ else: rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) - self.mc.ld(r.r9.value, r.SP.value, THREADLOCAL_ADDR_OFFSET) - self.mc.ld(r.r10.value, r.r9.value, p_errno) - self.mc.lwz(r.r10.value, r.r10.value, 0) - self.mc.stw(r.r10.value, r.r9.value, rpy_errno) + self.mc.LG(r.r12, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + self.mc.LG(r.r11, l.addr(p_errno, r.r12)) + self.mc.LGH(r.r11, l.addr(0, r.r11)) + self.mc.STG(r.r11, l.addr(p_errno, r.r12)) From pypy.commits at gmail.com Tue Dec 22 12:43:36 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Dec 2015 09:43:36 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: reverted changes to runner_test (for debug purpose), now the first errno test is fully passing Message-ID: <56798bc8.552f1c0a.e4b4d.2c49@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81422:5c47bf206610 Date: 2015-12-22 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/5c47bf206610/ Log: reverted changes to runner_test (for debug purpose), now the first errno test is fully passing diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2997,23 +2997,19 @@ if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("not on LLGraph") eci = ExternalCompilationInfo( - separate_module_sources=[""" + separate_module_sources=[''' #include - #include static long f1(long a, long b, long c, long d, long e, long f, long g) { errno = 42; - printf("value: a %d, b %d, c %d, d %d, e %d, f %d, g %d \\n", a,b,c,d,e,f,g); - long v = (a + 10*b + 100*c + 1000*d + + return (a + 10*b + 100*c + 1000*d + 10000*e + 100000*f + 1000000*g); - printf("value: %d\\n", v); - return v; } RPY_EXPORTED long test_call_release_gil_save_errno(void) { return (long)&f1; } - """]) + ''']) fn_name = 'test_call_release_gil_save_errno' getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, compilation_info=eci, _nowrapper=True) @@ -3023,8 +3019,8 @@ # for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO, - #rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO, - #rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO, + rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO, + rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO, ]: faildescr = BasicFailDescr(1) inputargs = [InputArgInt() for i in range(7)] diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -184,19 +184,19 @@ RFASTGILPTR = self.RFASTGILPTR # r10: &fastgil RSHADOWOLD = self.RSHADOWOLD # r12: previous val of root_stack_top - # Equivalent of 'r14 = __sync_lock_test_and_set(&rpy_fastgil, 1);' + # Equivalent of 'r12 = __sync_lock_test_and_set(&rpy_fastgil, 1);' self.mc.LGHI(r.SCRATCH, l.imm(1)) retry_label = self.mc.currpos() # compare and swap, only succeeds if the the contents of the - # lock is equal to r14 (= 0) - self.mc.LG(r.r14, l.addr(0, RFASTGILPTR)) - self.mc.CSG(r.r14, r.SCRATCH, l.addr(0, RFASTGILPTR)) # try to claim lock + # lock is equal to r12 (= 0) + self.mc.LG(r.r12, l.addr(0, RFASTGILPTR)) + self.mc.CSG(r.r12, r.SCRATCH, l.addr(0, RFASTGILPTR)) # try to claim lock self.mc.BRC(c.NE, l.imm(retry_label - self.mc.currpos())) # retry if failed self.mc.sync() - self.mc.CGHI(r.r14, l.imm0) + self.mc.CGHI(r.r12, l.imm0) b1_location = self.mc.currpos() - self.mc.trap() # boehm: patched with a BEQ: jump if r14 is zero + self.mc.trap() # boehm: patched with a BEQ: jump if r12 is zero self.mc.write('\x00'*4) # shadowstack: patched with BNE instead if self.asm.cpu.gc_ll_descr.gcrootmap: @@ -214,8 +214,8 @@ # revert the rpy_fastgil acquired above, so that the # general 'reacqgil_addr' below can acquire it again... - # (here, r14 is conveniently zero) - self.mc.STG(r.r14, l.addr(0,RFASTGILPTR)) + # (here, r12 is conveniently zero) + self.mc.STG(r.r12, l.addr(0,RFASTGILPTR)) pmc = OverwritingBuilder(self.mc, bne_location, 1) pmc.BCRL(c.NE, self.mc.currpos() - bne_location) @@ -258,16 +258,16 @@ rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) p_errno = llerrno.get_p_errno_offset(self.asm.cpu) self.mc.LG(r.r11, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) - self.mc.LGH(r.SCRATCH2, l.addr(rpy_errno, r.r11)) + self.mc.LGF(r.SCRATCH2, l.addr(rpy_errno, r.r11)) self.mc.LG(r.r11, l.addr(p_errno, r.r11)) - self.mc.STHY(r.SCRATCH2, l.addr(0,r.r11)) + self.mc.STY(r.SCRATCH2, l.addr(0,r.r11)) elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: # Same, but write zero. p_errno = llerrno.get_p_errno_offset(self.asm.cpu) self.mc.LG(r.r11, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) self.mc.LG(r.r11, l.addr(p_errno, r.r11)) self.mc.LGHI(r.SCRATCH, 0) - self.mc.STHY(r.SCRATCH, l.addr(0,r.r11)) + self.mc.STY(r.SCRATCH, l.addr(0,r.r11)) def read_real_errno(self, save_err): if save_err & rffi.RFFI_SAVE_ERRNO: @@ -281,5 +281,5 @@ p_errno = llerrno.get_p_errno_offset(self.asm.cpu) self.mc.LG(r.r12, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) self.mc.LG(r.r11, l.addr(p_errno, r.r12)) - self.mc.LGH(r.r11, l.addr(0, r.r11)) - self.mc.STG(r.r11, l.addr(p_errno, r.r12)) + self.mc.LGF(r.r11, l.addr(0, r.r11)) + self.mc.STY(r.r11, l.addr(rpy_errno, r.r12)) From pypy.commits at gmail.com Tue Dec 22 12:46:16 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Dec 2015 09:46:16 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: catchup with default changes Message-ID: <56798c68.c3151c0a.bf121.1916@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81423:b60fa730252e Date: 2015-12-22 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b60fa730252e/ Log: catchup with default changes diff too long, truncating to 2000 out of 3312 lines diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.0 +Version: 1.4.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -73,3 +73,20 @@ Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and turn them into regular RPython functions. Most RPython-compatible `os.*` functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK +.. branch: small-cleanups-misc +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing in rpython diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.0" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -40,10 +40,9 @@ at least 8 bytes in size. """ from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -71,9 +70,7 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) rffi.stackcounter.stacks_counter -= 1 - before = rffi.aroundstate.before - if before: - before() + rgil.release() def get_ll_cffi_call_python(): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -124,7 +124,7 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -602,6 +602,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -617,9 +618,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -692,9 +691,7 @@ pypy_debug_catch_fatal_exception() rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,14 +4,14 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -65,22 +65,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -591,45 +591,92 @@ def test_binaryfunc(self): module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), }; - static PyNumberMethods foo_as_number; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), + }; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.tool.udir import udir -import os +import os, sys, py class AppTestMMap: spaceconfig = dict(usemodules=('mmap',)) @@ -8,6 +8,15 @@ def setup_class(cls): cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def setup_method(self, meth): + if getattr(meth, 'is_large', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") + def test_page_size(self): import mmap assert mmap.PAGESIZE > 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func @@ -523,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [] + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) return fcond - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) + # add the base offset + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + # + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = abs(boxes[3].getint()) ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) - ofs, size, sign = unpack_fielddescr(op.getdescr()) + nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - ofs_size = default_imm_size if size < 8 else VMEM_imm_size + ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: @@ -839,17 +827,12 @@ self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size)] + res_loc = self.force_allocate_reg(op) + return [base_loc, ofs_loc, res_loc, imm(nsize)] - prepare_op_getfield_gc_i = _prepare_op_getfield - prepare_op_getfield_gc_r = _prepare_op_getfield - prepare_op_getfield_gc_f = _prepare_op_getfield - prepare_op_getfield_raw_i = _prepare_op_getfield - prepare_op_getfield_raw_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield + prepare_op_gc_load_i = _prepare_op_gc_load + prepare_op_gc_load_r = _prepare_op_gc_load + prepare_op_gc_load_f = _prepare_op_gc_load def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() @@ -859,188 +842,38 @@ self.free_temp_vars() return [base_loc, value_loc] - def _prepare_op_getinteriorfield(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) + def prepare_op_gc_store_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[3].getint() == 1 # scale + ofs = boxes[4].getint() + size = abs(boxes[5].getint()) + assert check_imm_arg(ofs) + return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] + + def _prepare_op_gc_load_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[2].getint() == 1 # scale + ofs = boxes[3].getint() + nsize = boxes[4].getint() + assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] + res_loc = self.force_allocate_reg(op) + return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)] - prepare_op_getinteriorfield_gc_i = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_r = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_f = _prepare_op_getinteriorfield - - def prepare_op_setinteriorfield_gc(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - return [base_loc, index_loc, value_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - prepare_op_setinteriorfield_raw = prepare_op_setinteriorfield_gc - - def prepare_op_arraylen_gc(self, op, fcond): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - arg = op.getarg(0) - base_loc = self.make_sure_var_in_reg(arg) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_op_setarrayitem_gc(self, op, fcond): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(args[0], args) - value_loc = self.make_sure_var_in_reg(args[2], args) - ofs_loc = self.make_sure_var_in_reg(args[1], args) - assert check_imm_arg(ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] - prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc - prepare_op_raw_store = prepare_op_setarrayitem_gc - - def _prepare_op_getarrayitem(self, op, fcond): - boxes = op.getarglist() - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - assert check_imm_arg(ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] - - prepare_op_getarrayitem_gc_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_f = _prepare_op_getarrayitem - prepare_op_raw_load_i = _prepare_op_getarrayitem - prepare_op_raw_load_f = _prepare_op_getarrayitem - - def prepare_op_strlen(self, op, fcond): - args = op.getarglist() - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, args) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - - res = self.force_allocate_reg(op) - self.possibly_free_var(op) - return [l0, l1, res] - - def prepare_op_strgetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0]) - - a1 = boxes[1] - imm_a1 = check_imm_box(a1) - if imm_a1: - ofs_loc = self.convert_to_imm(a1) - else: - ofs_loc = self.make_sure_var_in_reg(a1, boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [res, base_loc, ofs_loc, imm(basesize)] - - def prepare_op_strsetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [value_loc, base_loc, ofs_loc, imm(basesize)] + prepare_op_gc_load_indexed_i = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_r = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_f = _prepare_op_gc_load_indexed prepare_op_copystrcontent = void prepare_op_copyunicodecontent = void prepare_op_zero_array = void - def prepare_op_unicodelen(self, op, fcond): - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, [op.getarg(0)]) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [l0, l1, res] - - def prepare_op_unicodegetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [res, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - - def prepare_op_unicodesetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [value_loc, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - def _prepare_op_same_as(self, op, fcond): arg = op.getarg(0) imm_arg = check_imm_box(arg) @@ -1142,8 +975,7 @@ def prepare_op_cond_call_gc_wb(self, op, fcond): # we force all arguments in a reg because it will be needed anyway by - # the following setfield_gc or setarrayitem_gc. It avoids loading it - # twice from the memory. + # the following gc_store. It avoids loading it twice from the memory. N = op.numargs() args = op.getarglist() arglocs = [self.make_sure_var_in_reg(op.getarg(i), args) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -29,6 +29,10 @@ float_regs = VFPRegisterManager.all_regs frame_reg = fp + # can an ISA instruction handle a factor to the offset? + # XXX should be: tuple(1 << i for i in range(31)) + load_supported_factors = (1,) + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): AbstractLLCPU.__init__(self, rtyper, stats, opts, diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -380,6 +380,8 @@ # the call that it is no longer equal to css. See description # in translator/c/src/thread_pthread.c. + # XXX some duplicated logic here, but note that rgil.acquire() + # does more than just RPyGilAcquire() if old_rpy_fastgil == 0: # this case occurs if some other thread stole the GIL but # released it again. What occurred here is that we changed @@ -390,9 +392,8 @@ elif old_rpy_fastgil == 1: # 'rpy_fastgil' was (and still is) locked by someone else. # We need to wait for the regular mutex. - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() else: # stole the GIL from a different thread that is also # currently in an external call from the jit. Attach @@ -421,9 +422,8 @@ # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() _REACQGIL0_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) _REACQGIL2_FUNC = lltype.Ptr(lltype.FuncType([rffi.CCHARP, lltype.Signed], diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,6 +1,6 @@ from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.rarithmetic import ovfcheck +from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr @@ -165,7 +165,14 @@ if isinstance(index_box, ConstInt): index_box = ConstInt(index_box.value * factor) else: - index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) + # x & (x - 1) == 0 is a quick test for power of 2 + assert factor > 0 + if (factor & (factor - 1)) == 0: + index_box = ResOperation(rop.INT_LSHIFT, + [index_box, ConstInt(highest_bit(factor))]) + else: + index_box = ResOperation(rop.INT_MUL, + [index_box, ConstInt(factor)]) self.emit_op(index_box) factor = 1 # adjust the constant offset @@ -296,13 +303,6 @@ self.cpu.translate_support_code) self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), itemsize, itemsize, basesize) - elif op.getopnum() == rop.ZERO_PTR_FIELD: - ofs = op.getarg(1).getint() - size = WORD - index_box = ConstInt(0) - value_box = ConstInt(0) - self.emit_gc_store_or_indexed(op, op.getarg(0), index_box, value_box, - size, 1, ofs) return False @@ -532,7 +532,9 @@ return # the ZERO_ARRAY operation will be optimized according to what # SETARRAYITEM_GC we see before the next allocation operation. - # See emit_pending_zeros(). + # See emit_pending_zeros(). (This optimization is done by + # hacking the object 'o' in-place: e.g., o.getarg(1) may be + # replaced with another constant greater than 0.) o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], descr=arraydescr) self.emit_op(o) @@ -676,7 +678,7 @@ del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # - # Then write the ZERO_PTR_FIELDs that are still pending + # Then write the NULL-pointer-writing ops that are still pending for v, d in self._delayed_zero_setfields.iteritems(): v = self.get_box_replacement(v) for ofs in d.iterkeys(): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -17,7 +17,6 @@ from rpython.jit.backend.llsupport.test.test_regalloc_integration import BaseTestRegalloc from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter import longlong -from rpython.rlib.objectmodel import invoke_around_extcall CPU = getcpuclass() @@ -625,9 +624,6 @@ self.S = S self.cpu = cpu - def teardown_method(self, meth): - rffi.aroundstate._cleanup_() - def test_shadowstack_call(self): cpu = self.cpu cpu.gc_ll_descr.init_nursery(100) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -37,6 +37,7 @@ return ','.join([str(n) for n in [descr.itemsize, descr.basesize, size]]) + WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) sdescr = get_size_descr(self.gc_ll_descr, S) @@ -71,6 +72,12 @@ itzdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'z') itydescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'y') itxdescr = get_interiorfield_descr(self.gc_ll_descr, S1I, 'x') + S2I = lltype.GcArray(('x', lltype.Ptr(S1)), + ('y', lltype.Ptr(S1)), + ('z', lltype.Ptr(S1)), + ('t', lltype.Ptr(S1))) # size is a power of two + s2i_item_size_in_bits = (4 if WORD == 4 else 5) + ity2descr = get_interiorfield_descr(self.gc_ll_descr, S2I, 'y') R1 = lltype.GcStruct('R', ('x', lltype.Signed), ('y', lltype.Float), ('z', lltype.Ptr(S1))) @@ -90,7 +97,6 @@ # tiddescr = self.gc_ll_descr.fielddescr_tid wbdescr = self.gc_ll_descr.write_barrier_descr - WORD = globals()['WORD'] # F = lltype.GcArray(lltype.Float) fdescr = get_array_descr(self.gc_ll_descr, F) @@ -1224,6 +1230,13 @@ '%(itydescr.arraydescr.basesize' ' + itydescr.fielddescr.offset)d,' '%(itydescr.fielddescr.field_size)d)'], + [True, (1,2,4,8), 'i3 = setinteriorfield_gc(p0,i1,i2,descr=ity2descr)' '->' + 'i4 = int_lshift(i1,' + '%(s2i_item_size_in_bits)d);' + 'i3 = gc_store_indexed(p0,i4,i2,1,' + '%(ity2descr.arraydescr.basesize' + ' + itydescr.fielddescr.offset)d,' + '%(ity2descr.fielddescr.field_size)d)'], ]) def test_gc_load_store_transform(self, support_offset, factors, fromto): self.cpu.load_constant_offset = support_offset diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.jit import dont_look_inside -from rpython.rlib.objectmodel import invoke_around_extcall from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib import rposix @@ -16,20 +15,10 @@ compile_kwds = dict(enable_opts=ALL_OPTS_NAMES, thread=True) def define_simple(self): - class Glob: - def __init__(self): - self.event = 0 - glob = Glob() - # - c_strchr = rffi.llexternal('strchr', [rffi.CCHARP, lltype.Signed], rffi.CCHARP) - def func(): - glob.event += 1 - def before(n, x): - invoke_around_extcall(func, func) return (n, None, None, None, None, None, None, None, None, None, None, None) # @@ -73,7 +62,8 @@ def f42(n): length = len(glob.lst) raw = alloc1() - fn = llhelper(CALLBACK, rffi._make_wrapper_for(CALLBACK, callback)) + wrapper = rffi._make_wrapper_for(CALLBACK, callback, None, True) + fn = llhelper(CALLBACK, wrapper) if n & 1: # to create a loop and a bridge, and also pass # to run the qsort() call in the blackhole interp c_qsort(rffi.cast(rffi.VOIDP, raw), rffi.cast(rffi.SIZE_T, 2), diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4971,52 +4971,6 @@ [boxfloat(12.5)], 'int') assert res == struct.unpack("I", struct.pack("f", 12.5))[0] - def test_zero_ptr_field(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph can't do zero_ptr_field") - T = lltype.GcStruct('T') - S = lltype.GcStruct('S', ('x', lltype.Ptr(T))) - tdescr = self.cpu.sizeof(T) - sdescr = self.cpu.sizeof(S) - fielddescr = self.cpu.fielddescrof(S, 'x') - loop = parse(""" - [] - p0 = new(descr=tdescr) - p1 = new(descr=sdescr) - setfield_gc(p1, p0, descr=fielddescr) - zero_ptr_field(p1, %d) - finish(p1) - """ % fielddescr.offset, namespace=locals()) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - deadframe = self.cpu.execute_token(looptoken) - ref = self.cpu.get_ref_value(deadframe, 0) - s = lltype.cast_opaque_ptr(lltype.Ptr(S), ref) - assert not s.x - - def test_zero_ptr_field_2(self): - if not isinstance(self.cpu, AbstractLLCPU): - py.test.skip("llgraph does not do zero_ptr_field") - - from rpython.jit.backend.llsupport import symbolic - S = lltype.GcStruct('S', ('x', lltype.Signed), - ('p', llmemory.GCREF), - ('y', lltype.Signed)) - s = lltype.malloc(S) - s.x = -1296321 - s.y = -4398176 - s_ref = lltype.cast_opaque_ptr(llmemory.GCREF, s) - s.p = s_ref - ofs_p, _ = symbolic.get_field_token(S, 'p', False) - # - self.execute_operation(rop.ZERO_PTR_FIELD, [ - InputArgRef(s_ref), ConstInt(ofs_p)], # OK for now to assume that the - 'void') # 2nd argument is a constant - # - assert s.x == -1296321 - assert s.p == lltype.nullptr(llmemory.GCREF.TO) - assert s.y == -4398176 - def test_zero_array(self): if not isinstance(self.cpu, AbstractLLCPU): py.test.skip("llgraph does not do zero_array") diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -745,7 +745,6 @@ OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(GetInteriorFieldOperation(rop.GETINTERIORFIELD_GC_I)) OPERATIONS.append(SetFieldOperation(rop.SETFIELD_GC)) - OPERATIONS.append(ZeroPtrFieldOperation(rop.ZERO_PTR_FIELD)) OPERATIONS.append(SetInteriorFieldOperation(rop.SETINTERIORFIELD_GC)) OPERATIONS.append(NewOperation(rop.NEW)) OPERATIONS.append(NewOperation(rop.NEW_WITH_VTABLE)) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -57,19 +57,11 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) - if opnum != rop.ZERO_PTR_FIELD: - result = _execute_arglist(self.cpu, self.fakemetainterp, - opnum, argboxes, descr) - if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) - else: - import ctypes - addr = self.cpu.cast_gcref_to_int(argboxes[0].getref_base()) - offset = argboxes[1].getint() - assert (offset % ctypes.sizeof(ctypes.c_long)) == 0 - ptr = ctypes.cast(addr, ctypes.POINTER(ctypes.c_long)) - ptr[offset / ctypes.sizeof(ctypes.c_long)] = 0 + result = _execute_arglist(self.cpu, self.fakemetainterp, + opnum, argboxes, descr) + if result is not None: + c_result = wrap_constant(result) + op.copy_value_from(c_result) self.loop.operations.append(op) return op diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1039,8 +1039,7 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = size_box.value - itemsize = abs(size) + size = abs(size_box.value) if size == 1: need_lower_byte = True else: @@ -1049,7 +1048,7 @@ need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.perform_discard(op, [base_loc, ofs_loc, value_loc, - imm(itemsize)]) + imm(size)]) def consider_gc_store_indexed(self, op): args = op.getarglist() @@ -1062,8 +1061,7 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = size_box.value - itemsize = abs(size) + size = abs(size_box.value) if size == 1: need_lower_byte = True else: @@ -1072,7 +1070,7 @@ need_lower_byte=need_lower_byte) ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args) self.perform_discard(op, [base_loc, ofs_loc, value_loc, - imm(factor), imm(offset), imm(itemsize)]) + imm(factor), imm(offset), imm(size)]) def consider_increment_debug_counter(self, op): base_loc = self.loc(op.getarg(0)) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -367,7 +367,6 @@ rop.INCREMENT_DEBUG_COUNTER, rop.COND_CALL_GC_WB, rop.COND_CALL_GC_WB_ARRAY, - rop.ZERO_PTR_FIELD, rop.ZERO_ARRAY, rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -18,7 +18,6 @@ , (rop.SETINTERIORFIELD_RAW, 0, -1) , (rop.SETFIELD_GC, 0, -1) , (rop.SETFIELD_RAW, 0, -1) - , (rop.ZERO_PTR_FIELD, 0, -1) , (rop.ZERO_ARRAY, 0, -1) , (rop.STRSETITEM, 0, -1) , (rop.UNICODESETITEM, 0, -1) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1170,10 +1170,11 @@ 'GC_LOAD/3/rfi', # parameters GC_LOAD_INDEXED # 1: pointer to complex object - # 2: integer describing the offset + # 2: integer describing the index # 3: constant integer scale factor - # 4: constant integer offset + # 4: constant integer base offset (final offset is 'base + scale * index') # 5: constant integer. byte size of datatype to load (negative if it is signed) + # (GC_LOAD is equivalent to GC_LOAD_INDEXED with arg3==1, arg4==0) 'GC_LOAD_INDEXED/5/rfi', '_RAW_LOAD_FIRST', @@ -1204,8 +1205,9 @@ # same paramters as GC_LOAD, but one additional for the value to store # note that the itemsize is not signed! + # (gcptr, index, value, [scale, base_offset,] itemsize) 'GC_STORE/4d/n', - 'GC_STORE_INDEXED/5d/n', + 'GC_STORE_INDEXED/6d/n', 'INCREMENT_DEBUG_COUNTER/1/n', '_RAW_STORE_FIRST', @@ -1219,8 +1221,6 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_PTR_FIELD/2/n', # only emitted by the rewrite, clears a pointer field - # at a given constant offset, no descr 'ZERO_ARRAY/3d/n', # only emitted by the rewrite, clears (part of) an array # [arraygcptr, firstindex, length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4044,7 +4044,7 @@ self.interp_operations(f, []) def test_external_call(self): - from rpython.rlib.objectmodel import invoke_around_extcall + from rpython.rlib import rgil TIME_T = lltype.Signed # ^^^ some 32-bit platforms have a 64-bit rffi.TIME_T, but we @@ -4058,11 +4058,6 @@ pass state = State() - def before(): - if we_are_jitted(): - raise Oups - state.l.append("before") - def after(): if we_are_jitted(): raise Oups @@ -4070,14 +4065,14 @@ def f(): state.l = [] - invoke_around_extcall(before, after) + rgil.invoke_after_thread_switch(after) external(lltype.nullptr(T.TO)) return len(state.l) res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 res = self.interp_operations(f, []) - assert res == 2 + assert res == 1 self.check_operations_history(call_release_gil_i=1, call_may_force_i=0) def test_unescaped_write_zero(self): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1124,8 +1124,8 @@ resultvar=op.result) def gct_gc_thread_run(self, hop): - assert self.translator.config.translation.thread - if hasattr(self.root_walker, 'thread_run_ptr'): + if (self.translator.config.translation.thread and + hasattr(self.root_walker, 'thread_run_ptr')): livevars = self.push_roots(hop) assert not livevars, "live GC var around %s!" % (hop.spaceop,) hop.genop("direct_call", [self.root_walker.thread_run_ptr]) diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -184,6 +184,9 @@ hdr.tid |= self.gc.gcflag_extra return (hdr.tid & self.gc.gcflag_extra) != 0 + def thread_run(self): + pass + # ____________________________________________________________ class LLInterpRootWalker: diff --git a/rpython/rlib/_rposix_repr.py b/rpython/rlib/_rposix_repr.py deleted file mode 100644 --- a/rpython/rlib/_rposix_repr.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -RTyping support for os.stat_result objects. -They are rtyped just like a tuple of the correct length supporting -only indexing and the st_xxx attributes. We need a custom StatResultRepr -because when rtyping for LL backends we have extra platform-dependent -items at the end of the tuple, but for OO backends we only want the -portable items. This allows the OO backends to assume a fixed shape for -the tuples returned by os.stat(). -""" -from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.flowspace.model import Constant -from rpython.flowspace.operation import op -from rpython.tool.pairtype import pairtype -from rpython.rtyper.rmodel import Repr -from rpython.rtyper.rint import IntegerRepr -from rpython.rtyper.error import TyperError -from rpython.rlib import rposix_stat - - -class StatResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.stat_fields = rposix_stat.STAT_FIELDS - - self.stat_field_indexes = {} - for i, (name, TYPE) in enumerate(self.stat_fields): - self.stat_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.stat_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.stat_field_indexes[attr] - except KeyError: - raise TyperError("os.stat().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatResultRepr, IntegerRepr)): - - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_stat_result(hop): - r_StatResult = hop.rtyper.getrepr(rposix_stat.s_StatResult) - [v_result] = hop.inputargs(r_StatResult.r_tuple) - # no-op conversion from r_StatResult.r_tuple to r_StatResult - hop.exception_cannot_occur() - return v_result - - -class StatvfsResultRepr(Repr): - - def __init__(self, rtyper): - self.rtyper = rtyper - self.statvfs_fields = rposix_stat.STATVFS_FIELDS - - self.statvfs_field_indexes = {} - for i, (name, TYPE) in enumerate(self.statvfs_fields): - self.statvfs_field_indexes[name] = i - - self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) - for name, TYPE in self.statvfs_fields]) - self.r_tuple = rtyper.getrepr(self.s_tuple) - self.lowleveltype = self.r_tuple.lowleveltype - - def redispatch_getfield(self, hop, index): - rtyper = self.rtyper - s_index = rtyper.annotator.bookkeeper.immutablevalue(index) - hop2 = hop.copy() - spaceop = op.getitem(hop.args_v[0], Constant(index)) - spaceop.result = hop.spaceop.result - hop2.spaceop = spaceop - hop2.args_v = spaceop.args - hop2.args_s = [self.s_tuple, s_index] - hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)] - return hop2.dispatch() - - def rtype_getattr(self, hop): - s_attr = hop.args_s[1] - attr = s_attr.const - try: - index = self.statvfs_field_indexes[attr] - except KeyError: - raise TyperError("os.statvfs().%s: field not available" % (attr,)) - return self.redispatch_getfield(hop, index) - - -class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)): - def rtype_getitem((r_sta, r_int), hop): - s_int = hop.args_s[1] - index = s_int.const - return r_sta.redispatch_getfield(hop, index) - - -def specialize_make_statvfs_result(hop): - r_StatvfsResult = hop.rtyper.getrepr(rposix_stat.s_StatvfsResult) - [v_result] = hop.inputargs(r_StatvfsResult.r_tuple) - hop.exception_cannot_occur() - return v_result diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -56,10 +56,11 @@ """ def deco(func): source = py.code.Source(""" + from rpython.rlib import rgil + def wrapper(%(args)s): # acquire the GIL - after = rffi.aroundstate.after - if after: after() + rgil.acquire() # rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -78,8 +79,7 @@ assert 0 # dead code rffi.stackcounter.stacks_counter -= 1 # release the GIL - before = rffi.aroundstate.before - if before: before() + rgil.release() # return res """ % {'args': ', '.join(['arg%d' % i for i in range(len(argtypes))])}) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -599,22 +599,10 @@ From pypy.commits at gmail.com Tue Dec 22 14:39:10 2015 From: pypy.commits at gmail.com (sbauman) Date: Tue, 22 Dec 2015 11:39:10 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Initial merge attempt Message-ID: <5679a6de.c6ecc20a.ff6b1.4030@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81424:bd02a18f4c31 Date: 2015-12-22 13:04 -0500 http://bitbucket.org/pypy/pypy/changeset/bd02a18f4c31/ Log: Initial merge attempt diff too long, truncating to 2000 out of 24251 lines diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ -all: pypy-c +all: pypy-c cffi_imports PYPY_EXECUTABLE := $(shell which pypy) URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") @@ -10,6 +10,8 @@ RUNINTERP = $(PYPY_EXECUTABLE) endif +.PHONY: cffi_imports + pypy-c: @echo @echo "====================================================================" @@ -36,3 +38,6 @@ # replaced with an opaque --jobserver option by the time this Makefile # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html + +cffi_imports: + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -18,9 +18,9 @@ assert '__pypy__' not in _sys.builtin_module_names newdict = lambda _ : {} try: - from __pypy__ import reversed_dict + from __pypy__ import reversed_dict as _reversed_dict except ImportError: - reversed_dict = lambda d: reversed(d.keys()) + _reversed_dict = None # don't have ordered dicts try: from thread import get_ident as _get_ident @@ -46,7 +46,7 @@ ''' def __reversed__(self): - return reversed_dict(self) + return _reversed_dict(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. @@ -116,6 +116,178 @@ return ItemsView(self) +def _compat_with_unordered_dicts(): + # This returns the methods needed in OrderedDict in case the base + # 'dict' class is not actually ordered, like on top of CPython or + # old PyPy or PyPy-STM. + + # ===== Original comments and code follows ===== + # ===== The unmodified methods are not repeated ===== + + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + link_prev[1] = link_next # update link_prev[NEXT] + link_next[0] = link_prev # update link_next[PREV] + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root[1] # start at the first node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root[0] # start at the last node + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + return locals() + +if _reversed_dict is None: + for _key, _value in _compat_with_unordered_dicts().items(): + setattr(OrderedDict, _key, _value) + del _key, _value + ################################################################################ ### namedtuple ################################################################################ diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -8,13 +8,13 @@ def __init__(self): self._builder = StringBuilder() def append(self, string): - try: - self._builder.append(string) - except UnicodeEncodeError: + if (isinstance(string, unicode) and + type(self._builder) is StringBuilder): ub = UnicodeBuilder() ub.append(self._builder.build()) self._builder = ub - ub.append(string) + self.append = ub.append # shortcut only + self._builder.append(string) def build(self): return self._builder.build() diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -604,21 +604,8 @@ def uuid4(): """Generate a random UUID.""" - - # When the system provides a version-4 UUID generator, use it. - if _uuid_generate_random: - _buffer = ctypes.create_string_buffer(16) - _uuid_generate_random(_buffer) - return UUID(bytes=_buffer.raw) - - # Otherwise, get randomness from urandom or the 'random' module. - try: - import os - return UUID(bytes=os.urandom(16), version=4) - except: - import random - bytes = [chr(random.randrange(256)) for i in range(16)] - return UUID(bytes=bytes, version=4) + import os + return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.3.1 +Version: 1.4.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.1" -__version_info__ = (1, 3, 1) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,7 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) -#define _CFFI_NUM_EXPORTS 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) +#define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -201,8 +203,11 @@ the others follow */ } +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); #endif -/********** end CPython-specific section **********/ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -72,6 +72,8 @@ self._cdefsources = [] self._included_ffis = [] self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -104,6 +106,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: + self._cdef_version = object() self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: @@ -589,14 +592,39 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.'): + def compile(self, tmpdir='.', verbose=0): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, **kwds) + source_extension=source_extension, + compiler_verbose=verbose, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result def _load_backend_lib(backend, name, flags): @@ -620,70 +648,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) - return - # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -697,6 +725,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -54,6 +54,7 @@ OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -29,6 +29,7 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"Python"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") @@ -80,6 +81,47 @@ parts.append(csource) return ''.join(parts) +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise api.CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise api.CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + def _preprocess(csource): # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literal! @@ -103,8 +145,13 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # # Replace "...}" with "__dotdotdotNUM__}". This construction should # occur only at the end of enums; at the end of structs we have "...;}" # and at the end of vararg functions "...);". Also replace "=...[,}]" @@ -257,6 +304,7 @@ break # try: + self._inside_extern_python = False for decl in iterator: if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) @@ -326,13 +374,19 @@ ' #define %s %s' % (key, key, key, value)) + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._inside_extern_python: + self._declare('extern_python ' + decl.name, tp) + else: + self._declare('function ' + decl.name, tp) + def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) else: if isinstance(node, pycparser.c_ast.Struct): self._get_struct_union_enum_type('struct', node) @@ -348,8 +402,7 @@ tp, quals = self._get_type_and_quals(node, partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp, quals) - self._declare('function ' + decl.name, tp) + self._declare_function(tp, quals, decl) elif (tp.is_integer_type() and hasattr(decl, 'init') and hasattr(decl.init, 'value') and @@ -362,10 +415,23 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif (quals & model.Q_CONST) and not tp.is_array_type: - self._declare('constant ' + decl.name, tp, quals=quals) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = not self._inside_extern_python + assert self._inside_extern_python == ( + decl.name == '__cffi_extern_python_start') else: - self._declare('variable ' + decl.name, tp, quals=quals) + if self._inside_extern_python: + raise api.CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): return self.parse_type_and_quals(cdecl)[0] diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -17,15 +17,16 @@ def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] - allsources.extend(sources) + for src in sources: + allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -35,10 +36,10 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution - import distutils.errors + import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() @@ -48,7 +49,12 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - dist.run_command('build_ext') + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + finally: + distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -1,5 +1,6 @@ -/* See doc/misc/parse_c_type.rst in the source of CFFI for more information */ +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ typedef void *_cffi_opcode_t; @@ -27,6 +28,7 @@ #define _CFFI_OP_DLOPEN_FUNC 35 #define _CFFI_OP_DLOPEN_CONST 37 #define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 #define _CFFI_PRIM_VOID 0 #define _CFFI_PRIM_BOOL 1 @@ -160,6 +162,12 @@ const char *error_message; }; +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + #ifdef _CFFI_INTERNAL static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); static int search_in_globals(const struct _cffi_type_context_s *ctx, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -118,6 +118,7 @@ class Recompiler: + _num_externpy = 0 def __init__(self, ffi, module_name, target_is_python=False): self.ffi = ffi @@ -356,7 +357,10 @@ else: prnt(' NULL, /* no includes */') prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - prnt(' 0, /* flags */') + flags = 0 + if self._num_externpy: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) prnt('};') prnt() # @@ -366,6 +370,11 @@ prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') + if self._num_externpy: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') prnt(' p[0] = (const void *)%s;' % VERSION) prnt(' p[1] = &_cffi_type_context;') prnt('}') @@ -1108,6 +1117,75 @@ GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + + def _generate_cpy_extern_python_decl(self, tp, name): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s", %s };' % (name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('static %s' % tp.result.get_c_name(name_and_arguments)) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise ffiplatform.VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + # ---------- # emitting the opcodes for individual types def _emit_bytecode_VoidType(self, tp, index): @@ -1232,7 +1310,8 @@ return os.path.join(outputdir, *parts), parts def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, **kwds): + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1252,7 +1331,7 @@ cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) return outputfilename diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -21,6 +21,8 @@ import math as _math import struct as _struct +_SENTINEL = object() + def _cmp(x, y): return 0 if x == y else 1 if x > y else -1 @@ -31,6 +33,8 @@ MAXYEAR = 9999 _MINYEARFMT = 1900 +_MAX_DELTA_DAYS = 999999999 + # Utility functions, adapted from Python's Demo/classes/Dates.py, which # also assumes the current Gregorian calendar indefinitely extended in # both directions. Difference: Dates.py calls January 1 of year 0 day @@ -95,6 +99,15 @@ # pasting together 25 4-year cycles. assert _DI100Y == 25 * _DI4Y - 1 +_US_PER_US = 1 +_US_PER_MS = 1000 +_US_PER_SECOND = 1000000 +_US_PER_MINUTE = 60000000 +_SECONDS_PER_DAY = 24 * 3600 +_US_PER_HOUR = 3600000000 +_US_PER_DAY = 86400000000 +_US_PER_WEEK = 604800000000 + def _ord2ymd(n): "ordinal -> (year, month, day), considering 01-Jan-0001 as day 1." @@ -271,15 +284,17 @@ def _check_int_field(value): if isinstance(value, int): - return value + return int(value) if not isinstance(value, float): try: value = value.__int__() except AttributeError: pass else: - if isinstance(value, (int, long)): - return value + if isinstance(value, int): + return int(value) + elif isinstance(value, long): + return int(long(value)) raise TypeError('__int__ method should return an integer') raise TypeError('an integer is required') raise TypeError('integer argument expected, got float') @@ -344,75 +359,79 @@ raise TypeError("can't compare '%s' to '%s'" % ( type(x).__name__, type(y).__name__)) -# This is a start at a struct tm workalike. Goals: -# -# + Works the same way across platforms. -# + Handles all the fields datetime needs handled, without 1970-2038 glitches. -# -# Note: I suspect it's best if this flavor of tm does *not* try to -# second-guess timezones or DST. Instead fold whatever adjustments you want -# into the minutes argument (and the constructor will normalize). +def _normalize_pair(hi, lo, factor): + if not 0 <= lo <= factor-1: + inc, lo = divmod(lo, factor) + hi += inc + return hi, lo -class _tmxxx: +def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False): + # Normalize all the inputs, and store the normalized values. + ss, us = _normalize_pair(ss, us, 1000000) + mm, ss = _normalize_pair(mm, ss, 60) + hh, mm = _normalize_pair(hh, mm, 60) + d, hh = _normalize_pair(d, hh, 24) + y, m, d = _normalize_date(y, m, d, ignore_overflow) + return y, m, d, hh, mm, ss, us - ordinal = None +def _normalize_date(year, month, day, ignore_overflow=False): + # That was easy. Now it gets muddy: the proper range for day + # can't be determined without knowing the correct month and year, + # but if day is, e.g., plus or minus a million, the current month + # and year values make no sense (and may also be out of bounds + # themselves). + # Saying 12 months == 1 year should be non-controversial. + if not 1 <= month <= 12: + year, month = _normalize_pair(year, month-1, 12) + month += 1 + assert 1 <= month <= 12 - def __init__(self, year, month, day, hour=0, minute=0, second=0, - microsecond=0): - # Normalize all the inputs, and store the normalized values. - if not 0 <= microsecond <= 999999: - carry, microsecond = divmod(microsecond, 1000000) - second += carry - if not 0 <= second <= 59: - carry, second = divmod(second, 60) - minute += carry - if not 0 <= minute <= 59: - carry, minute = divmod(minute, 60) - hour += carry - if not 0 <= hour <= 23: - carry, hour = divmod(hour, 24) - day += carry + # Now only day can be out of bounds (year may also be out of bounds + # for a datetime object, but we don't care about that here). + # If day is out of bounds, what to do is arguable, but at least the + # method here is principled and explainable. + dim = _days_in_month(year, month) + if not 1 <= day <= dim: + # Move day-1 days from the first of the month. First try to + # get off cheap if we're only one day out of range (adjustments + # for timezone alone can't be worse than that). + if day == 0: # move back a day + month -= 1 + if month > 0: + day = _days_in_month(year, month) + else: + year, month, day = year-1, 12, 31 + elif day == dim + 1: # move forward a day + month += 1 + day = 1 + if month > 12: + month = 1 + year += 1 + else: + ordinal = _ymd2ord(year, month, 1) + (day - 1) + year, month, day = _ord2ymd(ordinal) - # That was easy. Now it gets muddy: the proper range for day - # can't be determined without knowing the correct month and year, - # but if day is, e.g., plus or minus a million, the current month - # and year values make no sense (and may also be out of bounds - # themselves). - # Saying 12 months == 1 year should be non-controversial. - if not 1 <= month <= 12: - carry, month = divmod(month-1, 12) - year += carry - month += 1 - assert 1 <= month <= 12 + if not ignore_overflow and not MINYEAR <= year <= MAXYEAR: + raise OverflowError("date value out of range") + return year, month, day - # Now only day can be out of bounds (year may also be out of bounds - # for a datetime object, but we don't care about that here). - # If day is out of bounds, what to do is arguable, but at least the - # method here is principled and explainable. - dim = _days_in_month(year, month) - if not 1 <= day <= dim: - # Move day-1 days from the first of the month. First try to - # get off cheap if we're only one day out of range (adjustments - # for timezone alone can't be worse than that). - if day == 0: # move back a day - month -= 1 - if month > 0: - day = _days_in_month(year, month) - else: - year, month, day = year-1, 12, 31 - elif day == dim + 1: # move forward a day - month += 1 - day = 1 - if month > 12: - month = 1 - year += 1 - else: - self.ordinal = _ymd2ord(year, month, 1) + (day - 1) - year, month, day = _ord2ymd(self.ordinal) - - self.year, self.month, self.day = year, month, day - self.hour, self.minute, self.second = hour, minute, second - self.microsecond = microsecond +def _accum(tag, sofar, num, factor, leftover): + if isinstance(num, (int, long)): + prod = num * factor + rsum = sofar + prod + return rsum, leftover + if isinstance(num, float): + fracpart, intpart = _math.modf(num) + prod = int(intpart) * factor + rsum = sofar + prod + if fracpart == 0.0: + return rsum, leftover + assert isinstance(factor, (int, long)) + fracpart, intpart = _math.modf(factor * fracpart) + rsum += int(intpart) + return rsum, leftover + fracpart + raise TypeError("unsupported type for timedelta %s component: %s" % + (tag, type(num))) class timedelta(object): """Represent the difference between two datetime objects. @@ -433,100 +452,42 @@ """ __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' - def __new__(cls, days=0, seconds=0, microseconds=0, - milliseconds=0, minutes=0, hours=0, weeks=0): - # Doing this efficiently and accurately in C is going to be difficult - # and error-prone, due to ubiquitous overflow possibilities, and that - # C double doesn't have enough bits of precision to represent - # microseconds over 10K years faithfully. The code here tries to make - # explicit where go-fast assumptions can be relied on, in order to - # guide the C implementation; it's way more convoluted than speed- - # ignoring auto-overflow-to-long idiomatic Python could be. + def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL, + milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL): + x = 0 + leftover = 0.0 + if microseconds is not _SENTINEL: + x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover) + if milliseconds is not _SENTINEL: + x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover) + if seconds is not _SENTINEL: + x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover) + if minutes is not _SENTINEL: + x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover) + if hours is not _SENTINEL: + x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover) + if days is not _SENTINEL: + x, leftover = _accum("days", x, days, _US_PER_DAY, leftover) + if weeks is not _SENTINEL: + x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover) + if leftover != 0.0: + x += _round(leftover) + return cls._from_microseconds(x) - # XXX Check that all inputs are ints, longs or floats. + @classmethod + def _from_microseconds(cls, us): + s, us = divmod(us, _US_PER_SECOND) + d, s = divmod(s, _SECONDS_PER_DAY) + return cls._create(d, s, us, False) - # Final values, all integer. - # s and us fit in 32-bit signed ints; d isn't bounded. - d = s = us = 0 + @classmethod + def _create(cls, d, s, us, normalize): + if normalize: + s, us = _normalize_pair(s, us, 1000000) + d, s = _normalize_pair(d, s, 24*3600) - # Normalize everything to days, seconds, microseconds. - days += weeks*7 - seconds += minutes*60 + hours*3600 - microseconds += milliseconds*1000 - - # Get rid of all fractions, and normalize s and us. - # Take a deep breath . - if isinstance(days, float): - dayfrac, days = _math.modf(days) - daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.)) - assert daysecondswhole == int(daysecondswhole) # can't overflow - s = int(daysecondswhole) - assert days == int(days) - d = int(days) - else: - daysecondsfrac = 0.0 - d = days - assert isinstance(daysecondsfrac, float) - assert abs(daysecondsfrac) <= 1.0 - assert isinstance(d, (int, long)) - assert abs(s) <= 24 * 3600 - # days isn't referenced again before redefinition - - if isinstance(seconds, float): - secondsfrac, seconds = _math.modf(seconds) - assert seconds == int(seconds) - seconds = int(seconds) - secondsfrac += daysecondsfrac - assert abs(secondsfrac) <= 2.0 - else: - secondsfrac = daysecondsfrac - # daysecondsfrac isn't referenced again - assert isinstance(secondsfrac, float) - assert abs(secondsfrac) <= 2.0 - - assert isinstance(seconds, (int, long)) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) # can't overflow - assert isinstance(s, int) - assert abs(s) <= 2 * 24 * 3600 - # seconds isn't referenced again before redefinition - - usdouble = secondsfrac * 1e6 - assert abs(usdouble) < 2.1e6 # exact value not critical - # secondsfrac isn't referenced again - - if isinstance(microseconds, float): - microseconds = _round(microseconds + usdouble) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = int(microseconds) - else: - microseconds = int(microseconds) - seconds, microseconds = divmod(microseconds, 1000000) - days, seconds = divmod(seconds, 24*3600) - d += days - s += int(seconds) - microseconds = _round(microseconds + usdouble) - assert isinstance(s, int) - assert isinstance(microseconds, int) - assert abs(s) <= 3 * 24 * 3600 - assert abs(microseconds) < 3.1e6 - - # Just a little bit of carrying possible for microseconds and seconds. - seconds, us = divmod(microseconds, 1000000) - s += seconds - days, s = divmod(s, 24*3600) - d += days - - assert isinstance(d, (int, long)) - assert isinstance(s, int) and 0 <= s < 24*3600 - assert isinstance(us, int) and 0 <= us < 1000000 - - if abs(d) > 999999999: - raise OverflowError("timedelta # of days is too large: %d" % d) + if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS: + raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS)) self = object.__new__(cls) self._days = d @@ -535,6 +496,10 @@ self._hashcode = -1 return self + def _to_microseconds(self): + return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND + + self._microseconds) + def __repr__(self): module = "datetime." if self.__class__ is timedelta else "" if self._microseconds: @@ -562,8 +527,7 @@ def total_seconds(self): """Total seconds in the duration.""" - return ((self.days * 86400 + self.seconds) * 10**6 + - self.microseconds) / 10**6 + return self._to_microseconds() / 10**6 # Read-only field accessors @property @@ -585,36 +549,37 @@ if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days + other._days, - self._seconds + other._seconds, - self._microseconds + other._microseconds) + return timedelta._create(self._days + other._days, + self._seconds + other._seconds, + self._microseconds + other._microseconds, + True) return NotImplemented - __radd__ = __add__ - def __sub__(self, other): if isinstance(other, timedelta): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(self._days - other._days, - self._seconds - other._seconds, - self._microseconds - other._microseconds) - return NotImplemented - - def __rsub__(self, other): - if isinstance(other, timedelta): - return -self + other + return timedelta._create(self._days - other._days, + self._seconds - other._seconds, + self._microseconds - other._microseconds, + True) return NotImplemented def __neg__(self): # for CPython compatibility, we cannot use # our __class__ here, but need a real timedelta - return timedelta(-self._days, - -self._seconds, - -self._microseconds) + return timedelta._create(-self._days, + -self._seconds, + -self._microseconds, + True) def __pos__(self): - return self + # for CPython compatibility, we cannot use + # our __class__ here, but need a real timedelta + return timedelta._create(self._days, + self._seconds, + self._microseconds, + False) def __abs__(self): if self._days < 0: @@ -623,25 +588,18 @@ return self def __mul__(self, other): - if isinstance(other, (int, long)): - # for CPython compatibility, we cannot use - # our __class__ here, but need a real timedelta - return timedelta(self._days * other, - self._seconds * other, - self._microseconds * other) - return NotImplemented + if not isinstance(other, (int, long)): + return NotImplemented + usec = self._to_microseconds() + return timedelta._from_microseconds(usec * other) __rmul__ = __mul__ - def _to_microseconds(self): - return ((self._days * (24*3600) + self._seconds) * 1000000 + - self._microseconds) - def __div__(self, other): if not isinstance(other, (int, long)): return NotImplemented usec = self._to_microseconds() - return timedelta(0, 0, usec // other) + return timedelta._from_microseconds(usec // other) __floordiv__ = __div__ @@ -705,9 +663,8 @@ def __reduce__(self): return (self.__class__, self._getstate()) -timedelta.min = timedelta(-999999999) -timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, - microseconds=999999) +timedelta.min = timedelta(-_MAX_DELTA_DAYS) +timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1) timedelta.resolution = timedelta(microseconds=1) class date(object): @@ -948,32 +905,29 @@ # Computations - def _checkOverflow(self, year): - if not MINYEAR <= year <= MAXYEAR: - raise OverflowError("date +/-: result year %d not in %d..%d" % - (year, MINYEAR, MAXYEAR)) + def _add_timedelta(self, other, factor): + y, m, d = _normalize_date( + self._year, + self._month, + self._day + other.days * factor) + return date(y, m, d) def __add__(self, other): "Add a date to a timedelta." if isinstance(other, timedelta): - t = _tmxxx(self._year, - self._month, - self._day + other.days) - self._checkOverflow(t.year) - result = date(t.year, t.month, t.day) - return result + return self._add_timedelta(other, 1) return NotImplemented __radd__ = __add__ def __sub__(self, other): """Subtract two dates, or a date and a timedelta.""" - if isinstance(other, timedelta): - return self + timedelta(-other.days) if isinstance(other, date): days1 = self.toordinal() days2 = other.toordinal() - return timedelta(days1 - days2) + return timedelta._create(days1 - days2, 0, 0, False) + if isinstance(other, timedelta): + return self._add_timedelta(other, -1) return NotImplemented def weekday(self): @@ -1340,7 +1294,7 @@ offset = self._tzinfo.utcoffset(None) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1378,7 +1332,7 @@ offset = self._tzinfo.dst(None) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1505,18 +1459,24 @@ A timezone info object may be passed in as well. """ + _check_tzinfo_arg(tz) + converter = _time.localtime if tz is None else _time.gmtime + self = cls._from_timestamp(converter, timestamp, tz) + if tz is not None: + self = tz.fromutc(self) + return self - _check_tzinfo_arg(tz) + @classmethod + def utcfromtimestamp(cls, t): + "Construct a UTC datetime from a POSIX timestamp (like time.time())." + return cls._from_timestamp(_time.gmtime, t, None) - converter = _time.localtime if tz is None else _time.gmtime - - if isinstance(timestamp, int): - us = 0 - else: - t_full = timestamp - timestamp = int(_math.floor(timestamp)) - frac = t_full - timestamp - us = _round(frac * 1e6) + @classmethod + def _from_timestamp(cls, converter, timestamp, tzinfo): + t_full = timestamp + timestamp = int(_math.floor(timestamp)) + frac = t_full - timestamp + us = _round(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, @@ -1527,32 +1487,7 @@ us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp) ss = min(ss, 59) # clamp out leap seconds if the platform has them - result = cls(y, m, d, hh, mm, ss, us, tz) - if tz is not None: - result = tz.fromutc(result) - return result - - @classmethod - def utcfromtimestamp(cls, t): - "Construct a UTC datetime from a POSIX timestamp (like time.time())." - if isinstance(t, int): - us = 0 - else: - t_full = t - t = int(_math.floor(t)) - frac = t_full - t - us = _round(frac * 1e6) - - # If timestamp is less than one microsecond smaller than a - # full second, us can be rounded up to 1000000. In this case, - # roll over to seconds, otherwise, ValueError is raised - # by the constructor. - if us == 1000000: - t += 1 - us = 0 - y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) - ss = min(ss, 59) # clamp out leap seconds if the platform has them - return cls(y, m, d, hh, mm, ss, us) + return cls(y, m, d, hh, mm, ss, us, tzinfo) @classmethod def now(cls, tz=None): @@ -1594,9 +1529,9 @@ hh, mm, ss = self.hour, self.minute, self.second offset = self._utcoffset() if offset: # neither None nor 0 - tm = _tmxxx(y, m, d, hh, mm - offset) - y, m, d = tm.year, tm.month, tm.day - hh, mm = tm.hour, tm.minute + mm -= offset + y, m, d, hh, mm, ss, _ = _normalize_datetime( + y, m, d, hh, mm, ss, 0, ignore_overflow=True) return _build_struct_time(y, m, d, hh, mm, ss, 0) def date(self): @@ -1730,7 +1665,7 @@ offset = self._tzinfo.utcoffset(self) offset = _check_utc_offset("utcoffset", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1768,7 +1703,7 @@ offset = self._tzinfo.dst(self) offset = _check_utc_offset("dst", offset) if offset is not None: - offset = timedelta(minutes=offset) + offset = timedelta._create(0, offset * 60, 0, True) return offset # Return an integer (or None) instead of a timedelta (or None). @@ -1859,22 +1794,22 @@ return -1 return diff and 1 or 0 + def _add_timedelta(self, other, factor): + y, m, d, hh, mm, ss, us = _normalize_datetime( + self._year, + self._month, + self._day + other.days * factor, + self._hour, + self._minute, + self._second + other.seconds * factor, + self._microsecond + other.microseconds * factor) + return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo) + def __add__(self, other): "Add a datetime and a timedelta." if not isinstance(other, timedelta): return NotImplemented - t = _tmxxx(self._year, - self._month, - self._day + other.days, - self._hour, - self._minute, - self._second + other.seconds, - self._microsecond + other.microseconds) - self._checkOverflow(t.year) - result = datetime(t.year, t.month, t.day, - t.hour, t.minute, t.second, - t.microsecond, tzinfo=self._tzinfo) - return result + return self._add_timedelta(other, 1) __radd__ = __add__ @@ -1882,16 +1817,15 @@ "Subtract two datetimes, or a datetime and a timedelta." if not isinstance(other, datetime): if isinstance(other, timedelta): - return self + -other + return self._add_timedelta(other, -1) return NotImplemented - days1 = self.toordinal() - days2 = other.toordinal() - secs1 = self._second + self._minute * 60 + self._hour * 3600 - secs2 = other._second + other._minute * 60 + other._hour * 3600 - base = timedelta(days1 - days2, - secs1 - secs2, - self._microsecond - other._microsecond) + delta_d = self.toordinal() - other.toordinal() + delta_s = (self._hour - other._hour) * 3600 + \ + (self._minute - other._minute) * 60 + \ + (self._second - other._second) + delta_us = self._microsecond - other._microsecond + base = timedelta._create(delta_d, delta_s, delta_us, True) if self._tzinfo is other._tzinfo: return base myoff = self._utcoffset() diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) @@ -138,8 +150,16 @@ current directory. The executable behaves mostly like a normal Python interpreter (see :doc:`cpython_differences`). +Build cffi import libraries for the stdlib +------------------------------------------ -.. _translate-pypy: +Various stdlib modules require a separate build step to create the cffi +import libraries in the `out-of-line API mode`_. This is done by the following +command:: + + PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py + +.. _`out-of-line API mode`: http://cffi.readthedocs.org/en/latest/overview.html#real-example-api-level-out-of-line Translating with non-standard options ------------------------------------- @@ -199,4 +219,3 @@ that this is never the case. -.. TODO windows diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,28 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) - rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py +Then get the branch `stmgc-c8`_ of PyPy and run:: -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ + cd pypy/goal + ../../rpython/bin/rpython -Ojit --stm + +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -112,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,4 +15,78 @@ Fix the cpyext tests on OSX by linking with -flat_namespace .. branch: anntype + Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. + +.. branch: fix-trace-too-long-heuristic + +Improve the heuristic when disable trace-too-long + +.. branch: fix-setslice-can-resize + +.. branch: anntype2 + +A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: + +- Implement @doubledispatch decorator and use it for intersection() and difference(). + +- Turn isinstance into a SpaceOperation + +- Create a few direct tests of the fundamental annotation invariant in test_model.py + +- Remove bookkeeper attribute from DictDef and ListDef. + +.. branch: cffi-static-callback + +.. branch: vecopt-absvalue + +- Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK +.. branch: small-cleanups-misc +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing in rpython diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -60,6 +60,7 @@ set PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone set PYPY_GC_MAX_DELTA= + PYTHONPATH=../.. ./pypy-c ../tool/build_cffi_imports.py .. _build instructions: http://pypy.org/download.html#building-from-source diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -81,9 +81,8 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode + from rpython.rlib.entrypoint import entrypoint_highlevel from rpython.rtyper.lltypesystem import rffi, lltype - from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -92,7 +91,8 @@ return f """) - @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], + c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib verbose = rffi.cast(lltype.Signed, verbose) @@ -126,30 +126,24 @@ debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) return rffi.cast(rffi.INT, -1) - @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') + @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): return pypy_execute_source_ptr(ll_source, 0) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], - c_name='pypy_execute_source_ptr') + @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') def pypy_execute_source_ptr(ll_source, ll_ptr): - after = rffi.aroundstate.after - if after: after() source = rffi.charp2str(ll_source) res = _pypy_execute_source(source, ll_ptr) - before = rffi.aroundstate.before - if before: before() return rffi.cast(rffi.INT, res) - @entrypoint('main', [], c_name='pypy_init_threads') + @entrypoint_highlevel('main', [], c_name='pypy_init_threads') def pypy_init_threads(): if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - before = rffi.aroundstate.before - if before: before() - @entrypoint('main', [], c_name='pypy_thread_attach') + @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): if not space.config.objspace.usemodules.thread: return @@ -158,8 +152,6 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - before = rffi.aroundstate.before - if before: before() def _pypy_execute_source(source, c_argument): try: diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -289,6 +289,8 @@ for w_item in space.fixedview(obj): result_w.append(self._make_key(w_item)) w_key = space.newtuple(result_w[:]) + elif isinstance(obj, PyCode): + w_key = space.newtuple([obj, w_type, space.id(obj)]) else: w_key = space.newtuple([obj, w_type]) return w_key diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -931,6 +931,11 @@ finally: space.call_function(w_set_debug, space.w_True) + def test_dont_fold_equal_code_objects(self): + yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float + yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()", + 'repr(x)', '(0.0, -0.0)') + class AppTestCompiler: diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.3.1" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/call_python.py @@ -0,0 +1,133 @@ +import os +from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.rarithmetic import intmask +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import llhelper + +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import interp2app +from pypy.module._cffi_backend import parse_c_type +from pypy.module._cffi_backend import cerrno +from pypy.module._cffi_backend import cffi_opcode +from pypy.module._cffi_backend import realize_c_type +from pypy.module._cffi_backend.realize_c_type import getop, getarg + + +STDERR = 2 +EXTERNPY_FN = lltype.FuncType([parse_c_type.PEXTERNPY, rffi.CCHARP], + lltype.Void) + + +def _cffi_call_python(ll_externpy, ll_args): + """Invoked by the helpers generated from extern "Python" in the cdef. + + 'externpy' is a static structure that describes which of the + extern "Python" functions is called. It has got fields 'name' and + 'type_index' describing the function, and more reserved fields + that are initially zero. These reserved fields are set up by + ffi.def_extern(), which invokes externpy_deco() below. + + 'args' is a pointer to an array of 8-byte entries. Each entry + contains an argument. If an argument is less than 8 bytes, only + the part at the beginning of the entry is initialized. If an + argument is 'long double' or a struct/union, then it is passed + by reference. + + 'args' is also used as the place to write the result to + (directly, even if more than 8 bytes). In all cases, 'args' is + at least 8 bytes in size. + """ + from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil + From pypy.commits at gmail.com Tue Dec 22 14:39:12 2015 From: pypy.commits at gmail.com (sbauman) Date: Tue, 22 Dec 2015 11:39:12 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Fix broken test and screwups from merge Message-ID: <5679a6e0.508e1c0a.d3236.3aee@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81425:1c52e941a48d Date: 2015-12-22 14:28 -0500 http://bitbucket.org/pypy/pypy/changeset/1c52e941a48d/ Log: Fix broken test and screwups from merge diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -9266,7 +9266,7 @@ guard_value(i1, 5) [] jump() """ - a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE)), 5, zero=True) + a = lltype.malloc(lltype.GcArray(lltype.Ptr(self.NODE3)), 5, zero=True) self.optimize_loop(ops, expected, jump_values=[a]) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -246,6 +246,11 @@ def forget_value(self): pass +def is_pure_getfield(opnum, descr): + if opnum not in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R): + return False + return descr is not None and descr.is_always_pure() != False + class AbstractResOp(AbstractResOpOrInputArg): """The central ResOperation class, representing one operation.""" @@ -1166,6 +1171,20 @@ # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- + # parameters GC_LOAD + # 1: pointer to complex object + # 2: integer describing the offset + # 3: constant integer. byte size of datatype to load (negative if it is signed) + 'GC_LOAD/3/rfi', + # parameters GC_LOAD_INDEXED + # 1: pointer to complex object + # 2: integer describing the index + # 3: constant integer scale factor + # 4: constant integer base offset (final offset is 'base + scale * index') + # 5: constant integer. byte size of datatype to load (negative if it is signed) + # (GC_LOAD is equivalent to GC_LOAD_INDEXED with arg3==1, arg4==0) + 'GC_LOAD_INDEXED/5/rfi', + '_RAW_LOAD_FIRST', 'GETARRAYITEM_GC/2d/rfi', 'VEC_GETARRAYITEM_GC/2d/fi', From pypy.commits at gmail.com Tue Dec 22 17:12:13 2015 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Dec 2015 14:12:13 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in vincentlegoll/pypy/whatsnew (pull request #383) Message-ID: <5679cabd.a85fc20a.99e77.ffff8052@mx.google.com> Author: Philip Jenvey Branch: Changeset: r81428:8e1560f634c9 Date: 2015-12-22 14:11 -0800 http://bitbucket.org/pypy/pypy/changeset/8e1560f634c9/ Log: Merged in vincentlegoll/pypy/whatsnew (pull request #383) Whatsnew update diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -83,10 +86,17 @@ Trivial cleanups in flowspace.operation : fix comment & duplicated method .. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + .. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + .. branch: cpyext-slotdefs .. branch: fix-missing-canraise .. branch: fix-2211 -Fix the cryptic exception message when attempting to use extended slicing in rpython +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. From pypy.commits at gmail.com Tue Dec 22 17:12:21 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 22 Dec 2015 14:12:21 -0800 (PST) Subject: [pypy-commit] pypy whatsnew: new branch Message-ID: <5679cac5.0357c20a.f7afb.78dd@mx.google.com> Author: Vincent Legoll Branch: whatsnew Changeset: r81426:411af75f8a8c Date: 2015-12-20 10:47 +0100 http://bitbucket.org/pypy/pypy/changeset/411af75f8a8c/ Log: new branch From pypy.commits at gmail.com Tue Dec 22 17:12:23 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 22 Dec 2015 14:12:23 -0800 (PST) Subject: [pypy-commit] pypy whatsnew: Update what's new, documenting my branches. Message-ID: <5679cac7.ce131c0a.af61c.6e74@mx.google.com> Author: Vincent Legoll Branch: whatsnew Changeset: r81427:c0099c341793 Date: 2015-12-20 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/c0099c341793/ Log: Update what's new, documenting my branches. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -83,9 +86,16 @@ Trivial cleanups in flowspace.operation : fix comment & duplicated method .. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + .. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + .. branch: cpyext-slotdefs .. branch: fix-2211 -Fix the cryptic exception message when attempting to use extended slicing in rpython +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. From pypy.commits at gmail.com Wed Dec 23 02:25:42 2015 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Dec 2015 23:25:42 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in yufeiz/pypy-tp_getattro (pull request #384) Message-ID: <567a4c76.a658c20a.49ee6.ffffe936@mx.google.com> Author: Maciej Fijalkowski Branch: Changeset: r81432:d5545deb980c Date: 2015-12-23 09:25 +0200 http://bitbucket.org/pypy/pypy/changeset/d5545deb980c/ Log: Merged in yufeiz/pypy-tp_getattro (pull request #384) Expose different tp_getattro slots for different builtin types diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -380,6 +380,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=lltype.nullptr(rffi.VOIDP.TO), external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) From pypy.commits at gmail.com Wed Dec 23 02:25:57 2015 From: pypy.commits at gmail.com (yufeiz) Date: Tue, 22 Dec 2015 23:25:57 -0800 (PST) Subject: [pypy-commit] pypy default: Remove debug prints Message-ID: <567a4c85.e686c20a.322f2.ffffded6@mx.google.com> Author: Faye Zhao Branch: Changeset: r81431:ac941720d6fb Date: 2015-12-23 07:09 +0000 http://bitbucket.org/pypy/pypy/changeset/ac941720d6fb/ Log: Remove debug prints diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -417,8 +417,6 @@ PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); if (attr1->ob_ival != value->ob_ival) { - printf("attr1->ob_ival = %ld\\n", attr1->ob_ival); - printf("value->ob_ival = %ld\\n", value->ob_ival); PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; From pypy.commits at gmail.com Wed Dec 23 02:25:54 2015 From: pypy.commits at gmail.com (yufeiz) Date: Tue, 22 Dec 2015 23:25:54 -0800 (PST) Subject: [pypy-commit] pypy default: Expose tp_getattro for different builtin types. Message-ID: <567a4c82.c74fc20a.c7529.ffffea0e@mx.google.com> Author: Faye Zhao Branch: Changeset: r81429:0c6b1683826d Date: 2015-12-22 13:54 -0800 http://bitbucket.org/pypy/pypy/changeset/0c6b1683826d/ Log: Expose tp_getattro for different builtin types. diff --git a/.cache/v/cache/lastfailed b/.cache/v/cache/lastfailed new file mode 100644 --- /dev/null +++ b/.cache/v/cache/lastfailed @@ -0,0 +1,3 @@ +{ + "pypy/module/cpyext/test/test_typeobject.py::AppTestSlots::()::test_binaryfunc": true +} \ No newline at end of file diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -380,6 +380,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=lltype.nullptr(rffi.VOIDP.TO), external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,55 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + printf("attr1->ob_ival = %ld\\n", attr1->ob_ival); + printf("value->ob_ival = %ld\\n", value->ob_ival); + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) From pypy.commits at gmail.com Wed Dec 23 02:25:55 2015 From: pypy.commits at gmail.com (yufeiz) Date: Tue, 22 Dec 2015 23:25:55 -0800 (PST) Subject: [pypy-commit] pypy default: lastfailed deleted online with Bitbucket Message-ID: <567a4c83.6351c20a.2321.ffffdc9c@mx.google.com> Author: Faye Zhao Branch: Changeset: r81430:e47d3a6dc613 Date: 2015-12-22 21:55 +0000 http://bitbucket.org/pypy/pypy/changeset/e47d3a6dc613/ Log: lastfailed deleted online with Bitbucket diff --git a/.cache/v/cache/lastfailed b/.cache/v/cache/lastfailed deleted file mode 100644 --- a/.cache/v/cache/lastfailed +++ /dev/null @@ -1,3 +0,0 @@ -{ - "pypy/module/cpyext/test/test_typeobject.py::AppTestSlots::()::test_binaryfunc": true -} \ No newline at end of file From pypy.commits at gmail.com Wed Dec 23 04:23:03 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 23 Dec 2015 01:23:03 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added code to save/restore/propagate exception information Message-ID: <567a67f7.82bfc20a.d32f8.0abd@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81433:f8b22a235798 Date: 2015-12-23 10:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f8b22a235798/ Log: added code to save/restore/propagate exception information diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -8,6 +8,7 @@ from rpython.jit.backend.zarch import locations as l from rpython.jit.backend.zarch.pool import LiteralPool from rpython.jit.backend.zarch.codebuilder import InstrBuilder +from rpython.jit.backend.zarch.helper.regalloc import check_imm_value from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE from rpython.jit.backend.zarch.regalloc import ZARCHRegisterManager from rpython.jit.backend.zarch.arch import (WORD, @@ -27,6 +28,7 @@ from rpython.rlib import rgc from rpython.rlib.longlong2float import float2longlong from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref from rpython.rlib.jit import AsmInfo class AssemblerZARCH(BaseAssembler, OpAssembler): @@ -94,20 +96,18 @@ self._push_fp_regs_to_jitframe(mc) if exc: - pass # TODO - #xxx - ## We might have an exception pending. - #mc.load_imm(r.r2, self.cpu.pos_exc_value()) - ## Copy it into 'jf_guard_exc' - #offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') - #mc.load(r.r0.value, r.r2.value, 0) - #mc.store(r.r0.value, r.SPP.value, offset) - ## Zero out the exception fields - #diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() - #assert _check_imm_arg(diff) - #mc.li(r.r0.value, 0) - #mc.store(r.r0.value, r.r2.value, 0) - #mc.store(r.r0.value, r.r2.value, diff) + # We might have an exception pending. + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) + # Copy it into 'jf_guard_exc' + offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') + mc.LG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(offset, r.SPP)) + # Zero out the exception fields + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + mc.LGHI(r.SCRATCH2, l.imm(0)) + mc.STG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) # now we return from the complete frame, which starts from # _call_header_with_stack_check(). The _call_footer below does it. @@ -262,6 +262,28 @@ else: self.wb_slowpath[withcards + 2 * withfloats] = rawstart + def _store_and_reset_exception(self, mc, excvalloc, exctploc=None): + """Reset the exception, after fetching it inside the two regs. + """ + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + # Load the exception fields into the two registers + mc.LG(excvalloc, l.addr(0,r.SCRATCH)) + if exctploc is not None: + mc.LG(exctploc, l.addr(diff, r.SCRATCH)) + # Zero out the exception fields + mc.LGHI(r.SCRATCH2, l.imm(0)) + mc.STG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) + + def _restore_exception(self, mc, excvalloc, exctploc): + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + # Store the exception fields from the two registers + mc.STG(excvalloc, l.addr(0, r.SCRATCH)) + mc.STG(exctploc, l.addr(diff, r.SCRATCH)) def build_frame_realloc_slowpath(self): # this code should do the following steps @@ -328,7 +350,7 @@ if not self.cpu.propagate_exception_descr: return - self.mc = PPCBuilder() + self.mc = InstrBuilder() # # read and reset the current exception @@ -338,9 +360,9 @@ ofs4 = self.cpu.get_ofs_of_frame_field('jf_descr') self._store_and_reset_exception(self.mc, r.r3) - self.mc.load_imm(r.r4, propagate_exception_descr) - self.mc.std(r.r3.value, r.SPP.value, ofs3) - self.mc.std(r.r4.value, r.SPP.value, ofs4) + self.mc.load_imm(r.r3, propagate_exception_descr) + self.mc.STG(r.r2, l.addr(ofs3, r.SPP)) + self.mc.STG(r.r3, l.addr(ofs4, r.SPP)) # self._call_footer() rawstart = self.mc.materialize(self.cpu, []) @@ -381,9 +403,6 @@ if supports_floats: self._push_fp_regs_to_jitframe(mc) - # Save away the LR inside r30 - # TODO ? mc.mflr(r.RCS1.value) - # allocate a stack frame! mc.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) # store the backchain mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) @@ -396,7 +415,6 @@ # Finish self._reload_frame_if_necessary(mc) - # TODO ? mc.mtlr(r.RCS1.value) # restore LR self._pop_core_regs_from_jitframe(mc, saved_regs + [r.r14]) if supports_floats: self._pop_fp_regs_from_jitframe(mc) @@ -420,7 +438,7 @@ else: endaddr, lengthaddr, _ = self.cpu.insert_stack_check() diff = lengthaddr - endaddr - assert _check_imm_arg(diff) + assert check_imm_value(diff) mc = self.mc mc.load_imm(r.SCRATCH, self.stack_check_slowpath) @@ -611,11 +629,9 @@ # if self.propagate_exception_path == 0 (tests), this may jump to 0 # and segfaults. too bad. the alternative is to continue anyway # with r3==0, but that will segfault too. - if False: - # TODO !! - xxx - self.mc.cmp_op(0, r.r3.value, 0, imm=True) - self.mc.b_cond_abs(self.propagate_exception_path, c.EQ) + self.mc.cmp_op(r.r2, l.imm(0), imm=True) + self.mc.load_imm(r.RETURN, self.propagate_exception_path) + self.mc.BCR(c.EQ, r.RETURN) def regalloc_push(self, loc, already_pushed): """Pushes the value stored in loc to the stack diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -266,7 +266,7 @@ p_errno = llerrno.get_p_errno_offset(self.asm.cpu) self.mc.LG(r.r11, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) self.mc.LG(r.r11, l.addr(p_errno, r.r11)) - self.mc.LGHI(r.SCRATCH, 0) + self.mc.LGHI(r.SCRATCH, l.imm(0)) self.mc.STY(r.SCRATCH, l.addr(0,r.r11)) def read_real_errno(self, save_err): From pypy.commits at gmail.com Wed Dec 23 04:33:09 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 23 Dec 2015 01:33:09 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: pushing constant base ptr of gc_store/load into literal pool Message-ID: <567a6a55.aa0bc30a.7f46c.1ce9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81434:24cc91a9576d Date: 2015-12-23 10:32 +0100 http://bitbucket.org/pypy/pypy/changeset/24cc91a9576d/ Log: pushing constant base ptr of gc_store/load into literal pool diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -53,6 +53,10 @@ self.reserve_literal(8) return elif opnum == rop.GC_STORE or opnum == rop.GC_STORE_INDEXED: + arg = op.getarg(0) + if arg.is_constant(): + self.offset_map[arg] = self.size + self.reserve_literal(8) arg = op.getarg(2) if arg.is_constant(): self.offset_map[arg] = self.size @@ -64,6 +68,10 @@ or opnum in (rop.GC_LOAD_INDEXED_F, rop.GC_LOAD_INDEXED_R, rop.GC_LOAD_INDEXED_I,): + arg = op.getarg(0) + if arg.is_constant(): + self.offset_map[arg] = self.size + self.reserve_literal(8) return elif op.is_call_release_gil(): for arg in op.getarglist()[1:]: From pypy.commits at gmail.com Wed Dec 23 06:55:30 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 23 Dec 2015 03:55:30 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added impl to call memcpy Message-ID: <567a8bb2.034cc20a.16a73.4a83@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81435:f8b4c491d659 Date: 2015-12-23 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/f8b4c491d659/ Log: added impl to call memcpy diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -8,6 +8,7 @@ from rpython.jit.backend.zarch.helper.regalloc import (check_imm, check_imm_value) from rpython.jit.backend.zarch.codebuilder import ZARCHGuardToken, InstrBuilder +from rpython.jit.backend.llsupport import symbolic, jitframe import rpython.jit.backend.zarch.conditions as c import rpython.jit.backend.zarch.registers as r import rpython.jit.backend.zarch.locations as l @@ -832,6 +833,64 @@ def _mem_offset_supported(self, value): return -2**19 <= value < 2**19 + def emit_copystrcontent(self, op, arglocs, regalloc): + self._emit_copycontent(arglocs, is_unicode=False) + + def emit_copyunicodecontent(self, op, arglocs, regalloc): + self._emit_copycontent(arglocs, is_unicode=True) + + def _emit_load_for_copycontent(self, dst, src_ptr, src_ofs, scale): + if src_ofs.is_imm(): + value = src_ofs.value << scale + if check_imm_value(value): + self.mc.LGR(dst, src_ptr) + self.mc.AGHI(dst, l.imm(value)) + else: + self.mc.load_imm(dst, value) + self.mc.AGR(dst, src_ptr) + elif scale == 0: + self.mc.AGR(dst, src_ptr) + self.mc.AGR(dst, src_ofs) + else: + self.mc.SLAG(dst, src_ofs, l.add(scale)) + self.mc.AGR(dst, src_ptr) + + def _emit_copycontent(self, arglocs, is_unicode): + [src_ptr_loc, dst_ptr_loc, + src_ofs_loc, dst_ofs_loc, length_loc] = arglocs + + if is_unicode: + basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, + self.cpu.translate_support_code) + if itemsize == 2: scale = 1 + elif itemsize == 4: scale = 2 + else: raise AssertionError + else: + basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, + self.cpu.translate_support_code) + assert itemsize == 1 + scale = 0 + + self._emit_load_for_copycontent(r.r0, src_ptr_loc, src_ofs_loc, scale) + self._emit_load_for_copycontent(r.r2, dst_ptr_loc, dst_ofs_loc, scale) + + if length_loc.is_imm(): + length = length_loc.getint() + self.mc.load_imm(r.r4, length << scale) + else: + if scale > 0: + self.mc.sldi(r.r4.value, length_loc.value, scale) + elif length_loc is not r.r5: + self.mc.LGR(r.r4, length_loc) + + self.mc.LGR(r.r3, r.r0) + self.mc.AGHI(r.r3, l.imm(basesize)) + self.mc.AGHI(r.r2, l.imm(basesize)) + + self.mc.load_imm(self.mc.RAW_CALL_REG, self.memcpy_addr) + self.mc.raw_call() + + class ForceOpAssembler(object): _mixin_ = True @@ -930,7 +989,6 @@ mc.copy_to_raw_memory(oldadr) - class MiscOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1009,6 +1009,18 @@ locs = self._prepare_guard(op) return locs + def prepare_copystrcontent(self, op): + src_ptr_loc = self.ensure_reg(op.getarg(0)) + dst_ptr_loc = self.ensure_reg(op.getarg(1)) + src_ofs_loc = self.ensure_reg_or_any_imm(op.getarg(2)) + dst_ofs_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + length_loc = self.ensure_reg_or_any_imm(op.getarg(4)) + self._spill_before_call(save_all_regs=False) + return [src_ptr_loc, dst_ptr_loc, + src_ofs_loc, dst_ofs_loc, length_loc] + + prepare_copyunicodecontent = prepare_copystrcontent + def prepare_label(self, op): descr = op.getdescr() assert isinstance(descr, TargetToken) From pypy.commits at gmail.com Wed Dec 23 06:55:32 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 23 Dec 2015 03:55:32 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: finished memcpy call, adding stack frame to the routine correctly Message-ID: <567a8bb4.4a5ec20a.af75e.4214@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81436:024661e7ca0b Date: 2015-12-23 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/024661e7ca0b/ Log: finished memcpy call, adding stack frame to the routine correctly diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -1,6 +1,7 @@ from rpython.jit.backend.zarch import conditions as c from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as l +from rpython.jit.backend.zarch.arch import STD_FRAME_SIZE_IN_BYTES from rpython.jit.backend.zarch.instruction_builder import build_instr_codes from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from rpython.jit.backend.llsupport.assembler import GuardToken @@ -186,6 +187,13 @@ """ self.BASR(r.RETURN, call_reg) + def alloc_std_frame(self): + self.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) + self.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + + def restore_std_frame(self): + self.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + class OverwritingBuilder(BlockBuilderMixin, AbstractZARCHBuilder): def __init__(self, mc, start, num_insts=0): AbstractZARCHBuilder.__init__(self) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -849,10 +849,10 @@ self.mc.load_imm(dst, value) self.mc.AGR(dst, src_ptr) elif scale == 0: - self.mc.AGR(dst, src_ptr) + self.mc.LGR(dst, src_ptr) self.mc.AGR(dst, src_ofs) else: - self.mc.SLAG(dst, src_ofs, l.add(scale)) + self.mc.SLAG(dst, src_ofs, l.addr(scale)) self.mc.AGR(dst, src_ptr) def _emit_copycontent(self, arglocs, is_unicode): @@ -879,16 +879,18 @@ self.mc.load_imm(r.r4, length << scale) else: if scale > 0: - self.mc.sldi(r.r4.value, length_loc.value, scale) - elif length_loc is not r.r5: + self.mc.SLAG(r.r4, length_loc, l.addr(scale)) + elif length_loc is not r.r4: self.mc.LGR(r.r4, length_loc) self.mc.LGR(r.r3, r.r0) self.mc.AGHI(r.r3, l.imm(basesize)) self.mc.AGHI(r.r2, l.imm(basesize)) + self.mc.alloc_std_frame() self.mc.load_imm(self.mc.RAW_CALL_REG, self.memcpy_addr) self.mc.raw_call() + self.mc.restore_std_frame() class ForceOpAssembler(object): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1010,8 +1010,8 @@ return locs def prepare_copystrcontent(self, op): - src_ptr_loc = self.ensure_reg(op.getarg(0)) - dst_ptr_loc = self.ensure_reg(op.getarg(1)) + src_ptr_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) + dst_ptr_loc = self.ensure_reg(op.getarg(1), force_in_reg=True) src_ofs_loc = self.ensure_reg_or_any_imm(op.getarg(2)) dst_ofs_loc = self.ensure_reg_or_any_imm(op.getarg(3)) length_loc = self.ensure_reg_or_any_imm(op.getarg(4)) From pypy.commits at gmail.com Wed Dec 23 11:28:07 2015 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Dec 2015 08:28:07 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <567acb97.8a75c20a.13c37.ffffa2a0@mx.google.com> Author: mattip Branch: Changeset: r81437:4b029a1241a1 Date: 2015-12-23 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/4b029a1241a1/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -95,6 +95,7 @@ .. branch: cpyext-slotdefs .. branch: fix-missing-canraise +.. branch: whatsnew .. branch: fix-2211 From pypy.commits at gmail.com Wed Dec 23 17:24:33 2015 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Dec 2015 14:24:33 -0800 (PST) Subject: [pypy-commit] pypy default: improve .gitignore (Vincent Legoll, PR #385) Message-ID: <567b1f21.2457c20a.d9372.1ccc@mx.google.com> Author: mattip Branch: Changeset: r81439:a19ae08a813b Date: 2015-12-24 00:22 +0200 http://bitbucket.org/pypy/pypy/changeset/a19ae08a813b/ Log: improve .gitignore (Vincent Legoll, PR #385) diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ From pypy.commits at gmail.com Wed Dec 23 17:24:31 2015 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Dec 2015 14:24:31 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: test, implement unicode hash Message-ID: <567b1f1f.8a5a1c0a.d2aae.515d@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81438:43629fab94e1 Date: 2015-12-20 20:47 +0200 http://bitbucket.org/pypy/pypy/changeset/43629fab94e1/ Log: test, implement unicode hash diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -66,6 +66,7 @@ c = PyUnicode_AsUnicode(s); c[0] = 'a'; c[1] = 0xe9; + c[2] = 0x00; c[3] = 'c'; return s; """), @@ -74,6 +75,18 @@ assert len(s) == 4 assert s == u'a�\x00c' + def test_hash(self): + module = self.import_extension('foo', [ + ("test_hash", "METH_VARARGS", + ''' + PyObject* obj = (PyTuple_GetItem(args, 0)); + long hash = ((PyUnicodeObject*)obj)->hash; + return PyLong_FromLong(hash); + ''' + ), + ]) + res = module.test_hash(u"xyz") + assert res == hash(u'xyz') class TestUnicode(BaseApiTest): @@ -575,6 +588,3 @@ api.PyUnicode_Splitlines(w_str, 0))) assert r"[u'a\n', u'b\n', u'c\n', u'd']" == space.unwrap(space.repr( api.PyUnicode_Splitlines(w_str, 1))) - - def test_hash_and_defenc(self, space, api): - assert False # XXX test newly added struct members hash and defenc diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -58,7 +58,7 @@ py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen, flavor='raw', zero=True) py_uni.c_hash = -1 - #py_uni.c_defenc = lltype.nullptr(PyObject) + py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni def unicode_attach(space, py_obj, w_obj): @@ -66,8 +66,9 @@ py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_length = len(space.unicode_w(w_obj)) py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) - py_unicode.c_hash = -1 - #py_unicode.c_defenc = lltype.nullptr(PyObject) + print w_obj + py_unicode.c_hash = space.hash_w(w_obj) + py_unicode.c_defenc = lltype.nullptr(PyObject.TO) def unicode_realize(space, py_obj): """ @@ -77,6 +78,7 @@ py_uni = rffi.cast(PyUnicodeObject, py_obj) s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length) w_obj = space.wrap(s) + py_uni.c_hash = space.hash_w(w_obj) track_reference(space, py_obj, w_obj) return w_obj From pypy.commits at gmail.com Wed Dec 23 17:24:35 2015 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Dec 2015 14:24:35 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <567b1f23.01941c0a.aecc6.4727@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81440:d3063a8c955b Date: 2015-12-24 00:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d3063a8c955b/ Log: merge default into branch diff too long, truncating to 2000 out of 13951 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.3.1 +Version: 1.4.1 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.1" -__version_info__ = (1, 3, 1) +__version__ = "1.4.1" +__version_info__ = (1, 4, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -73,6 +73,7 @@ self._included_ffis = [] self._windows_unicode = None self._init_once_cache = {} + self._cdef_version = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in backend.__dict__: @@ -105,6 +106,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: + self._cdef_version = object() self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: @@ -646,70 +648,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) - return - # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -723,6 +725,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,29 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) - rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py - PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py +Then get the branch `stmgc-c8`_ of PyPy and run:: -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ + cd pypy/goal + ../../rpython/bin/rpython -Ojit --stm + +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -113,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -61,3 +64,40 @@ .. branch: vecopt-absvalue - Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -289,6 +289,8 @@ for w_item in space.fixedview(obj): result_w.append(self._make_key(w_item)) w_key = space.newtuple(result_w[:]) + elif isinstance(obj, PyCode): + w_key = space.newtuple([obj, w_type, space.id(obj)]) else: w_key = space.newtuple([obj, w_type]) return w_key diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -931,6 +931,11 @@ finally: space.call_function(w_set_debug, space.w_True) + def test_dont_fold_equal_code_objects(self): + yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float + yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()", + 'repr(x)', '(0.0, -0.0)') + class AppTestCompiler: diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.3.1" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -40,10 +40,9 @@ at least 8 bytes in size. """ from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -71,9 +70,7 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) rffi.stackcounter.stacks_counter -= 1 - before = rffi.aroundstate.before - if before: - before() + rgil.release() def get_ll_cffi_call_python(): diff --git a/pypy/module/_cffi_backend/lib_obj.py b/pypy/module/_cffi_backend/lib_obj.py --- a/pypy/module/_cffi_backend/lib_obj.py +++ b/pypy/module/_cffi_backend/lib_obj.py @@ -194,6 +194,8 @@ return self.dir1(ignore_global_vars=True) if is_getattr and attr == '__dict__': return self.full_dict_copy() + if is_getattr and attr == '__class__': + return self.space.type(self) if is_getattr and attr == '__name__': return self.descr_repr() raise oefmt(self.space.w_AttributeError, diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.3.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -12,6 +12,7 @@ p = ffi.new("int *") p[0] = -42 assert p[0] == -42 + assert type(ffi) is ffi.__class__ is _cffi1_backend.FFI def test_ffi_subclass(self): import _cffi_backend as _cffi1_backend @@ -22,6 +23,7 @@ assert foo.x == 42 p = foo.new("int *") assert p[0] == 0 + assert type(foo) is foo.__class__ is FOO def test_ffi_no_argument(self): import _cffi_backend as _cffi1_backend diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -16,8 +16,8 @@ from cffi import ffiplatform except ImportError: py.test.skip("system cffi module not found or older than 1.0.0") - if cffi.__version_info__ < (1, 3, 0): - py.test.skip("system cffi module needs to be at least 1.3.0") + if cffi.__version_info__ < (1, 4, 0): + py.test.skip("system cffi module needs to be at least 1.4.0") space.appexec([], """(): import _cffi_backend # force it to be initialized """) @@ -1029,6 +1029,7 @@ assert hasattr(lib, '__dict__') assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar' assert lib.__name__ == repr(lib) + assert lib.__class__ is type(lib) def test_macro_var_callback(self): ffi, lib = self.prepare( diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -7,7 +7,9 @@ from pypy.module.exceptions.interp_exceptions import W_IOError from pypy.module._io.interp_fileio import W_FileIO from pypy.module._io.interp_textio import W_TextIOWrapper -from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES +from rpython.rlib.rposix_stat import STAT_FIELD_TYPES + +HAS_BLKSIZE = 'st_blksize' in STAT_FIELD_TYPES class Cache: @@ -118,7 +120,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if 'st_blksize' in STAT_FIELD_TYPES: + if HAS_BLKSIZE: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -124,7 +124,7 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -601,6 +601,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -616,9 +617,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -691,9 +690,7 @@ pypy_debug_catch_fatal_exception() rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,14 +4,14 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -65,22 +65,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): @@ -378,6 +380,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=lltype.nullptr(rffi.VOIDP.TO), external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -387,12 +387,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", @@ -601,45 +642,92 @@ if self.runappdirect: py.test.xfail('segfault') module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), }; - static PyNumberMethods foo_as_number; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), + }; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -652,6 +654,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.tool.udir import udir -import os +import os, sys, py class AppTestMMap: spaceconfig = dict(usemodules=('mmap',)) @@ -8,6 +8,15 @@ def setup_class(cls): cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def setup_method(self, meth): + if getattr(meth, 'is_large', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") + def test_page_size(self): import mmap assert mmap.PAGESIZE > 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -1,5 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule -from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rlib import rposix import os exec 'import %s as posix' % os.name @@ -172,7 +172,7 @@ if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,12 +1,11 @@ import os import sys -from rpython.rlib import rposix, objectmodel, rurandom +from rpython.rlib import rposix, rposix_stat +from rpython.rlib import objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.module import ll_os_stat -from rpython.rtyper.module.ll_os import RegisterOs from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 @@ -43,6 +42,8 @@ return space.str0_w(w_obj) class FileEncoder(object): + is_unicode = True + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -54,6 +55,8 @@ return self.space.unicode0_w(self.w_obj) class FileDecoder(object): + is_unicode = False + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -212,13 +215,13 @@ # ____________________________________________________________ -STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) +STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_stat_result(space, st): FIELDS = STAT_FIELDS # also when not translating at all - lst = [None] * ll_os_stat.N_INDEXABLE_FIELDS + lst = [None] * rposix_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() stat_float_times = space.fromcache(StatState).stat_float_times for i, (name, TYPE) in FIELDS: @@ -226,7 +229,7 @@ if name in ('st_atime', 'st_mtime', 'st_ctime'): value = int(value) # rounded to an integer for indexed access w_value = space.wrap(value) - if i < ll_os_stat.N_INDEXABLE_FIELDS: + if i < rposix_stat.N_INDEXABLE_FIELDS: lst[i] = w_value else: space.setitem(w_keywords, space.wrap(name), w_value) @@ -254,7 +257,7 @@ def build_statvfs_result(space, st): - vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) for i, (name, _) in STATVFS_FIELDS: vals_w[i] = space.wrap(getattr(st, name)) w_tuple = space.newtuple(vals_w) @@ -267,7 +270,7 @@ """Perform a stat system call on the file referenced to by an open file descriptor.""" try: - st = os.fstat(fd) + st = rposix_stat.fstat(fd) except OSError, e: raise wrap_oserror(space, e) else: @@ -289,7 +292,7 @@ """ try: - st = dispatch_filename(rposix.stat)(space, w_path) + st = dispatch_filename(rposix_stat.stat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: @@ -298,7 +301,7 @@ def lstat(space, w_path): "Like stat(path), but do no follow symbolic links." try: - st = dispatch_filename(rposix.lstat)(space, w_path) + st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: @@ -327,7 +330,7 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): try: - st = os.fstatvfs(fd) + st = rposix_stat.fstatvfs(fd) except OSError as e: raise wrap_oserror(space, e) else: @@ -336,7 +339,7 @@ def statvfs(space, w_path): try: - st = dispatch_filename(rposix.statvfs)(space, w_path) + st = dispatch_filename(rposix_stat.statvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: @@ -427,11 +430,11 @@ try: if space.isinstance_w(w_path, space.w_unicode): path = FileEncoder(space, w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) else: path = space.str0_w(w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) except OSError, e: raise wrap_oserror2(space, e, w_path) @@ -661,7 +664,7 @@ def kill(space, pid, sig): "Kill a process with a signal." try: - rposix.os_kill(pid, sig) + rposix.kill(pid, sig) except OSError, e: raise wrap_oserror(space, e) @@ -677,7 +680,7 @@ """Abort the interpreter immediately. This 'dumps core' or otherwise fails in the hardest way possible on the hosting operating system.""" import signal - rposix.os_kill(os.getpid(), signal.SIGABRT) + rposix.kill(os.getpid(), signal.SIGABRT) @unwrap_spec(src='str0', dst='str0') def link(space, src, dst): @@ -1199,7 +1202,7 @@ raise wrap_oserror(space, e) def declare_new_w_star(name): - if name in RegisterOs.w_star_returning_int: + if name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'): @unwrap_spec(status=c_int) def WSTAR(space, status): return space.wrap(getattr(os, name)(status)) @@ -1211,7 +1214,7 @@ WSTAR.func_name = name return WSTAR -for name in RegisterOs.w_star: +for name in rposix.WAIT_MACROS: if hasattr(os, name): func = declare_new_w_star(name) globals()[name] = func diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -6,8 +6,8 @@ from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace from pypy.conftest import pypydir -from rpython.rtyper.module.ll_os import RegisterOs from rpython.translator.c.test.test_extfunc import need_sparse_files +from rpython.rlib import rposix import os import py import sys @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): @@ -576,7 +582,7 @@ raises(TypeError, "os.utime('xxx', 3)") raises(OSError, "os.utime('somefilewhichihopewouldneverappearhere', None)") - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): values = [0, 1, 127, 128, 255] code = py.code.Source(""" diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 5): + py.test.skip("re-enable me in version 1.5") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -465,10 +465,22 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - if (sys.platform == 'win32' and sys.maxsize < 2**32 and + if (sys.platform == 'win32' and sys.maxsize < 2**32 and self.Backend is not CTypesBackend): assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) else: assert "double(*)(double)" in str(ffi.typeof(m.sin)) x = m.sin(1.23) assert x == math.sin(1.23) + + def test_dir_on_dlopen_lib(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + typedef enum { MYE1, MYE2 } myenum_t; + double myfunc(double); + double myvar; + const double myconst; + #define MYFOO 42 + """) + m = ffi.dlopen(lib_m) + assert dir(m) == ['MYE1', 'MYE2', 'MYFOO', 'myconst', 'myfunc', 'myvar'] diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -8,6 +8,7 @@ p = ffi.new("int *") p[0] = -42 assert p[0] == -42 + assert type(ffi) is ffi.__class__ is _cffi1_backend.FFI def test_ffi_subclass(): class FOO(_cffi1_backend.FFI): @@ -17,6 +18,7 @@ assert foo.x == 42 p = foo.new("int *") assert p[0] == 0 + assert type(foo) is foo.__class__ is FOO def test_ffi_no_argument(): py.test.raises(TypeError, _cffi1_backend.FFI, 42) @@ -472,7 +474,11 @@ assert seen == [1] * (i + 1) def test_init_once_multithread_failure(): - import thread, time + if sys.version_info < (3,): + import thread + else: + import _thread as thread + import time def do_init(): seen.append('init!') time.sleep(1) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1157,6 +1157,7 @@ assert hasattr(lib, '__dict__') assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar' assert lib.__name__ == repr(lib) + assert lib.__class__ is type(lib) def test_macro_var_callback(): ffi = FFI() diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -482,13 +482,6 @@ secs = pytime.time() return space.wrap(secs) -if _WIN: - class PCCache: - pass - pccache = PCCache() - pccache.divisor = 0.0 - pccache.ctrStart = 0 - def clock(space): """clock() -> floating point number diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -546,7 +546,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -353,8 +353,9 @@ # * a user-defined bound or unbound method object # * a frozen pre-built constant (with _freeze_() == True) # * a bound method of a frozen pre-built constant + obj_key = Constant(pyobj) try: - return self.descs[pyobj] + return self.descs[obj_key] except KeyError: if isinstance(pyobj, types.FunctionType): result = description.FunctionDesc(self, pyobj) @@ -399,7 +400,7 @@ msg = "unexpected prebuilt constant" raise Exception("%s: %r" % (msg, pyobj)) result = self.getfrozen(pyobj) - self.descs[pyobj] = result + self.descs[obj_key] = result return result def getfrozen(self, pyobj): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool @@ -686,7 +686,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) - return SomeString() + return SomeString(no_nul=self.no_nul) method_encode.can_only_throw = [UnicodeEncodeError] @@ -719,7 +719,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) - return SomeUnicodeString() + return SomeUnicodeString(no_nul=self.no_nul) method_decode.can_only_throw = [UnicodeDecodeError] class __extend__(SomeChar, SomeUnicodeCodePoint): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -23,7 +23,7 @@ if func.func_code.co_cellvars: raise ValueError( """RPython functions cannot create closures -Possible casues: +Possible causes: Function is inner function Function uses generator expressions Lambda expressions diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func @@ -523,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [] + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) return fcond - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) + # add the base offset + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + # + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = abs(boxes[3].getint()) ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) From pypy.commits at gmail.com Thu Dec 24 04:03:19 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 01:03:19 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <567bb4d7.4a5ec20a.af75e.ffffa1d6@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r684:7d0b56d34dce Date: 2015-12-24 10:03 +0100 http://bitbucket.org/pypy/pypy.org/changeset/7d0b56d34dce/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61615 of $105000 (58.7%) + $61634 of $105000 (58.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $52747 of $60000 (87.9%) + $52771 of $60000 (88.0%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Thu Dec 24 05:15:31 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 02:15:31 -0800 (PST) Subject: [pypy-commit] cffi default: Clarify a little bit 'cffi_allocator_t'. Motivation: obscure gcc bug Message-ID: <567bc5c3.8a75c20a.13c37.ffffb6e5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2497:f52d1e25624c Date: 2015-12-24 11:14 +0100 http://bitbucket.org/cffi/cffi/changeset/f52d1e25624c/ Log: Clarify a little bit 'cffi_allocator_t'. Motivation: obscure gcc bug (issue #240) diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -279,8 +279,11 @@ # include "wchar_helper.h" #endif -typedef PyObject *const cffi_allocator_t[3]; -static cffi_allocator_t default_allocator = { NULL, NULL, NULL }; +typedef struct _cffi_allocator_s { + PyObject *ca_alloc, *ca_free; + int ca_dont_clear; +} cffi_allocator_t; +static const cffi_allocator_t default_allocator = { NULL, NULL, 0 }; static PyObject *FFIError; static PyObject *unique_cache; @@ -3019,21 +3022,18 @@ static CDataObject *allocate_with_allocator(Py_ssize_t basesize, Py_ssize_t datasize, CTypeDescrObject *ct, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CDataObject *cd; - PyObject *my_alloc = allocator[0]; - PyObject *my_free = allocator[1]; - PyObject *dont_clear_after_alloc = allocator[2]; - - if (my_alloc == NULL) { /* alloc */ + + if (allocator->ca_alloc == NULL) { cd = allocate_owning_object(basesize + datasize, ct); if (cd == NULL) return NULL; cd->c_data = ((char *)cd) + basesize; } else { - PyObject *res = PyObject_CallFunction(my_alloc, "n", datasize); + PyObject *res = PyObject_CallFunction(allocator->ca_alloc, "n", datasize); if (res == NULL) return NULL; @@ -3058,16 +3058,16 @@ return NULL; } - cd = allocate_gcp_object(cd, ct, my_free); + cd = allocate_gcp_object(cd, ct, allocator->ca_free); Py_DECREF(res); } - if (dont_clear_after_alloc == NULL) + if (!allocator->ca_dont_clear) memset(cd->c_data, 0, datasize); return cd; } static PyObject *direct_newp(CTypeDescrObject *ct, PyObject *init, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CTypeDescrObject *ctitem; CDataObject *cd; @@ -3172,7 +3172,7 @@ PyObject *init = Py_None; if (!PyArg_ParseTuple(args, "O!|O:newp", &CTypeDescr_Type, &ct, &init)) return NULL; - return direct_newp(ct, init, default_allocator); + return direct_newp(ct, init, &default_allocator); } static int diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -335,7 +335,7 @@ "pointer to the memory somewhere else, e.g. into another structure."); static PyObject *_ffi_new(FFIObject *self, PyObject *args, PyObject *kwds, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CTypeDescrObject *ct; PyObject *arg, *init = Py_None; @@ -353,15 +353,22 @@ static PyObject *ffi_new(FFIObject *self, PyObject *args, PyObject *kwds) { - return _ffi_new(self, args, kwds, default_allocator); + return _ffi_new(self, args, kwds, &default_allocator); } static PyObject *_ffi_new_with_allocator(PyObject *allocator, PyObject *args, PyObject *kwds) { + cffi_allocator_t alloc1; + PyObject *my_alloc, *my_free; + my_alloc = PyTuple_GET_ITEM(allocator, 1); + my_free = PyTuple_GET_ITEM(allocator, 2); + alloc1.ca_alloc = (my_alloc == Py_None ? NULL : my_alloc); + alloc1.ca_free = (my_free == Py_None ? NULL : my_free); + alloc1.ca_dont_clear = (PyTuple_GET_ITEM(allocator, 3) == Py_False); + return _ffi_new((FFIObject *)PyTuple_GET_ITEM(allocator, 0), - args, kwds, - &PyTuple_GET_ITEM(allocator, 1)); + args, kwds, &alloc1); } PyDoc_STRVAR(ffi_new_allocator_doc, @@ -396,27 +403,14 @@ return NULL; } - allocator = PyTuple_New(4); + allocator = PyTuple_Pack(4, + (PyObject *)self, + my_alloc, + my_free, + PyBool_FromLong(should_clear_after_alloc)); if (allocator == NULL) return NULL; - Py_INCREF(self); - PyTuple_SET_ITEM(allocator, 0, (PyObject *)self); - - if (my_alloc != Py_None) { - Py_INCREF(my_alloc); - PyTuple_SET_ITEM(allocator, 1, my_alloc); - } - if (my_free != Py_None) { - Py_INCREF(my_free); - PyTuple_SET_ITEM(allocator, 2, my_free); - } - if (!should_clear_after_alloc) { - PyObject *my_true = Py_True; - Py_INCREF(my_true); - PyTuple_SET_ITEM(allocator, 3, my_true); /* dont_clear_after_alloc */ - } - { static PyMethodDef md = {"allocator", (PyCFunction)_ffi_new_with_allocator, From pypy.commits at gmail.com Thu Dec 24 09:12:10 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 06:12:10 -0800 (PST) Subject: [pypy-commit] pypy default: update version number (thanks vlegoll) Message-ID: <567bfd3a.83e01c0a.604e0.ffff958f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81441:3946dcea6531 Date: 2015-12-24 15:10 +0100 http://bitbucket.org/pypy/pypy/changeset/3946dcea6531/ Log: update version number (thanks vlegoll) diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ From pypy.commits at gmail.com Thu Dec 24 10:02:59 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 07:02:59 -0800 (PST) Subject: [pypy-commit] pypy default: Caching the file descriptor in rurandom is a mess. CPython has a 99% Message-ID: <567c0923.6adec20a.ad5ea.1f6d@mx.google.com> Author: Armin Rigo Branch: Changeset: r81442:c6be1b27fa1d Date: 2015-12-24 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/c6be1b27fa1d/ Log: Caching the file descriptor in rurandom is a mess. CPython has a 99% solution and hopes for the remaining 1% not to occur. For now, we just don't cache the file descriptor (any more... 6810f401d08e). diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - From pypy.commits at gmail.com Thu Dec 24 10:27:49 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 24 Dec 2015 07:27:49 -0800 (PST) Subject: [pypy-commit] pypy default: fix translation Message-ID: <567c0ef5.2475c20a.d619c.01a7@mx.google.com> Author: mattip Branch: Changeset: r81443:29e5f94db9b6 Date: 2015-12-24 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/29e5f94db9b6/ Log: fix translation diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + CANNOT_FAIL) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -386,7 +387,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=lltype.nullptr(rffi.VOIDP.TO), external=True) + error=CANNOT_FAIL, external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) From pypy.commits at gmail.com Thu Dec 24 10:27:51 2015 From: pypy.commits at gmail.com (mattip) Date: Thu, 24 Dec 2015 07:27:51 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <567c0ef7.c4b1c20a.b3ce9.31f3@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r81444:3baca7226714 Date: 2015-12-24 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/3baca7226714/ Log: merge default into branch diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + CANNOT_FAIL) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -386,7 +387,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=lltype.nullptr(rffi.VOIDP.TO), external=True) + error=CANNOT_FAIL, external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - From pypy.commits at gmail.com Thu Dec 24 16:48:20 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 13:48:20 -0800 (PST) Subject: [pypy-commit] pypy default: clarify the fact that gc_store does *not* uses a negative size Message-ID: <567c6824.878e1c0a.dcf28.2db2@mx.google.com> Author: Armin Rigo Branch: Changeset: r81446:04af6667c794 Date: 2015-12-24 22:46 +0100 http://bitbucket.org/pypy/pypy/changeset/04af6667c794/ Log: clarify the fact that gc_store does *not* uses a negative size diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - size = abs(boxes[3].getint()) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -849,7 +849,7 @@ index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() - size = abs(boxes[5].getint()) + size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1039,7 +1039,8 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1061,7 +1062,8 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1204,8 +1204,12 @@ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- # same paramters as GC_LOAD, but one additional for the value to store - # note that the itemsize is not signed! + # note that the itemsize is not signed (always > 0) # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant 'GC_STORE/4d/n', 'GC_STORE_INDEXED/6d/n', From pypy.commits at gmail.com Thu Dec 24 16:48:18 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Dec 2015 13:48:18 -0800 (PST) Subject: [pypy-commit] pypy default: clean-up: reduce the frequency of 'gc_(load, store)_indexed', Message-ID: <567c6822.8673c20a.386b4.ffffa178@mx.google.com> Author: Armin Rigo Branch: Changeset: r81445:39e277d9a468 Date: 2015-12-24 22:37 +0100 http://bitbucket.org/pypy/pypy/changeset/39e277d9a468/ Log: clean-up: reduce the frequency of 'gc_(load,store)_indexed', which right now is used even for get/setfield, which makes little sense diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,11 +126,11 @@ def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, itemsize, factor, offset): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # - if factor == 1 and offset == 0: - args = [ptr_box, index_box, value_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] newload = ResOperation(rop.GC_STORE, args) else: args = [ptr_box, index_box, value_box, ConstInt(factor), @@ -153,18 +153,15 @@ index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - def _emit_mul_add_if_factor_offset_not_supported(self, index_box, factor, offset): - orig_factor = factor - # factor - must_manually_load_const = False # offset != 0 and not self.cpu.load_constant_offset - if factor != 1 and (factor not in self.cpu.load_supported_factors or \ - (not index_box.is_constant() and must_manually_load_const)): - # enter here if the factor is supported by the cpu - # OR the index is not constant and a new resop must be emitted - # to add the offset - if isinstance(index_box, ConstInt): - index_box = ConstInt(index_box.value * factor) - else: + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu # x & (x - 1) == 0 is a quick test for power of 2 assert factor > 0 if (factor & (factor - 1)) == 0: @@ -174,20 +171,13 @@ index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) self.emit_op(index_box) - factor = 1 - # adjust the constant offset - #if must_manually_load_const: - # if isinstance(index_box, ConstInt): - # index_box = ConstInt(index_box.value + offset) - # else: - # index_box = ResOperation(rop.INT_ADD, [index_box, ConstInt(offset)]) - # self.emit_op(index_box) - # offset = 0 - return factor, offset, index_box + factor = 1 + return factor, offset, index_box - def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, factor, offset, sign, type='i'): + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # if sign: @@ -197,8 +187,8 @@ optype = type if op is not None: optype = op.type - if factor == 1 and offset == 0: - args = [ptr_box, index_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] newload = ResOperation(OpHelpers.get_gc_load(optype), args) else: args = [ptr_box, index_box, ConstInt(factor), @@ -547,9 +537,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], descr=descrs.arraydescr) @@ -560,9 +549,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) @@ -612,15 +600,12 @@ descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - # emit GC_LOAD_INDEXED - itemsize, basesize, _ = unpack_arraydescr(descr) - factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(ConstInt(index), - itemsize, basesize) - args = [frame, index_box, arg, ConstInt(factor), - ConstInt(offset), ConstInt(itemsize)] - self.emit_op(ResOperation(rop.GC_STORE_INDEXED, args)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) descr = op.getdescr() assert isinstance(descr, JitCellToken) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -30,13 +30,26 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): - def trans_getarray_to_load(descr): - size = descr.basesize - if descr.is_item_signed(): - size = -size - return ','.join([str(n) for n in [descr.itemsize, - descr.basesize, - size]]) + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) @@ -376,7 +389,7 @@ gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) gc_store(p2, 0, 1234, 8) - gc_store(p1, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -485,7 +498,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - gc_store_indexed(p0, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -611,19 +624,19 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, 14, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p1, 0, 10, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - gc_store_indexed(p2, 0, i2, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - gc_store_indexed(p3, 0, i2, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -636,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -650,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -671,7 +684,7 @@ zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,7 +706,7 @@ zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -705,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -725,7 +738,7 @@ zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -743,12 +756,12 @@ size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - gc_store_indexed(p1, 0, p2, %(scale)s, %(offset)s, %(size)s) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) """, interiorzdescr=interiorzdescr, scale=scale, offset=offset, size=size) @@ -763,7 +776,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -781,7 +794,7 @@ p1 = nursery_ptr_increment(p0, %(tdescr.size)d) gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -798,7 +811,7 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, i2, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -816,8 +829,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - gc_store_indexed(p0, 1, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -835,8 +848,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -855,9 +868,9 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -878,11 +891,11 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -901,10 +914,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -923,10 +936,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -955,7 +968,7 @@ gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(bdescr.basesize)s, 1) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -991,10 +1004,10 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - gc_store_indexed(p1, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1012,7 +1025,7 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1025,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) - gc_store_indexed(p0, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -1036,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_size.field_size)s) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) - i2 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_depth.field_size)s) - gc_store_indexed(p1, 0, 0, 1, 1, %(jf_extra_stack_depth.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_savedata.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_force_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_guard_exc.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_forward.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s gc_store(p1, 0, i2, %(framelendescr.field_size)s) - gc_store_indexed(p1, 0, ConstClass(frame_info), 1, 1, %(jf_frame_info.field_size)s) - gc_store_indexed(p1, 0, i0, 8, 3, 8) - gc_store_indexed(p1, 1, f0, 8, 5, 8) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1101,7 +1114,7 @@ p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = gc_load_indexed_r(p0, 0, 1, %(tzdescr.field_size)s, %(tzdescr.field_size)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1155,23 +1168,19 @@ # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_raw(p0,i1,descr=ydescr)' '->' - 'i3 = gc_store_indexed(p0,0,i1,1,' - '%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_gc(p0,p0,descr=zdescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' 'cond_call_gc_wb(p0, descr=wbdescr);' - 'i3 = gc_store_indexed(p0,0,p0,1,' - '%(zdescr.offset)s,%(zdescr.field_size)s)'], + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], #[False, (1,), 'i3 = strlen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], [True, (1,), 'i3 = strlen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' @@ -1179,7 +1188,7 @@ # '%(unicodelendescr.offset)s,' # '%(unicodelendescr.field_size)s)'], [True, (1,), 'i3 = unicodelen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): From pypy.commits at gmail.com Sun Dec 27 12:41:48 2015 From: pypy.commits at gmail.com (arigo) Date: Sun, 27 Dec 2015 09:41:48 -0800 (PST) Subject: [pypy-commit] pypy default: on a fresh Ubuntu, I get a TypeError sometimes because run_subprocess() Message-ID: <568022dc.4e0e1c0a.6a0cd.ffffbc15@mx.google.com> Author: Armin Rigo Branch: Changeset: r81447:d93c80771e69 Date: 2015-12-27 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d93c80771e69/ Log: on a fresh Ubuntu, I get a TypeError sometimes because run_subprocess() receives a list containing a few unicodes instead of just strings... diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -9,6 +9,8 @@ from subprocess import PIPE, Popen def run_subprocess(executable, args, env=None, cwd=None): + if isinstance(args, list): + args = [a.encode('latin1') for a in args] return _run(executable, args, env, cwd) shell_default = False From pypy.commits at gmail.com Mon Dec 28 05:54:04 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Dec 2015 02:54:04 -0800 (PST) Subject: [pypy-commit] cffi default: Issue #241: work around another bug(?) of libffi Message-ID: <568114cc.a85fc20a.99e77.513d@mx.google.com> Author: Armin Rigo Branch: Changeset: r2498:e09ccb6b8d89 Date: 2015-12-28 11:53 +0100 http://bitbucket.org/cffi/cffi/changeset/e09ccb6b8d89/ Log: Issue #241: work around another bug(?) of libffi diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4648,7 +4648,9 @@ if (cif_descr != NULL) { /* exchange data size */ - cif_descr->exchange_size = exchange_offset; + /* we also align it to the next multiple of 8, in an attempt to + work around bugs(?) of libffi like #241 */ + cif_descr->exchange_size = ALIGN_ARG(exchange_offset); } return 0; } From pypy.commits at gmail.com Mon Dec 28 06:00:16 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Dec 2015 03:00:16 -0800 (PST) Subject: [pypy-commit] pypy default: copy cffi/e09ccb6b8d89 Message-ID: <56811640.c4efc20a.c40ff.5c1b@mx.google.com> Author: Armin Rigo Branch: Changeset: r81448:cdd09c7fd673 Date: 2015-12-28 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/cdd09c7fd673/ Log: copy cffi/e09ccb6b8d89 diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi From pypy.commits at gmail.com Mon Dec 28 06:41:28 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 03:41:28 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: merged default, Message-ID: <56811fe8.863f1c0a.cca08.ffff80a0@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81450:5988809aee50 Date: 2015-12-28 12:40 +0100 http://bitbucket.org/pypy/pypy/changeset/5988809aee50/ Log: merged default, continue to refactor zero_array to move the scaling to rewrite diff too long, truncating to 2000 out of 12605 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.0 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.0" -__version_info__ = (1, 4, 0) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -73,28 +73,36 @@ lzma (PyPy3 only) liblzma -sqlite3 - libsqlite3 - -curses - libncurses + cffi dependencies from above - pyexpat libexpat1 _ssl libssl +Make sure to have these libraries (with development headers) installed +before building PyPy, otherwise the resulting binary will not contain +these modules. Furthermore, the following libraries should be present +after building PyPy, otherwise the corresponding CFFI modules are not +built (you can run or re-run `pypy/tool/release/package.py` to retry +to build them; you don't need to re-translate the whole PyPy): + +sqlite3 + libsqlite3 + +curses + libncurses + gdbm libgdbm-dev -Make sure to have these libraries (with development headers) installed before -building PyPy, otherwise the resulting binary will not contain these modules. +tk + tk-dev On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \ + tk-dev For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. @@ -102,6 +110,7 @@ yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + (XXX plus the Febora version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -110,6 +119,7 @@ zypper install gcc make python-devel pkg-config \ zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ libexpat-devel libffi-devel python-curses + (XXX plus the SLES11 version of libgdbm-dev and tk-dev) For the optional lzma module on PyPy3 you will also need ``xz-devel``. @@ -125,11 +135,13 @@ Translate with JIT:: - pypy rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=jit Translate without JIT:: - pypy rpython/bin/rpython --opt=2 pypy/goal/targetpypystandalone.py + cd pypy/goal + pypy ../../rpython/bin/rpython --opt=2 (You can use ``python`` instead of ``pypy`` here, which will take longer but works too.) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -130,8 +130,13 @@ More complete example --------------------- -.. note:: This example depends on pypy_execute_source_ptr which is not available - in PyPy <= 2.2.1. +.. note:: Note that we do not make use of ``extern "Python"``, the new + way to do callbacks in CFFI 1.4: this is because these examples use + the ABI mode, not the API mode, and with the ABI mode you still have + to use ``ffi.callback()``. It is work in progress to integrate + ``extern "Python"`` with the idea of embedding (and it is expected + to ultimately lead to a better way to do embedding than the one + described here, and that would work equally well on CPython and PyPy). Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -83,29 +83,27 @@ **pypy-stm requires 64-bit Linux for now.** -Development is done in the branch `stmgc-c7`_. If you are only -interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04). The current version -supports four "segments", which means that it will run up to four -threads in parallel. (Development recently switched to `stmgc-c8`_, -but that is not ready for trying out yet.) +Development is done in the branch `stmgc-c8`_. If you are only +interested in trying it out, please pester us until we upload a recent +prebuilt binary. The current version supports four "segments", which +means that it will run up to four threads in parallel. To build a version from sources, you first need to compile a custom -version of clang(!); we recommend downloading `llvm and clang like -described here`__, but at revision 201645 (use ``svn co -r 201645 `` -for all checkouts). Then apply all the patches in `this directory`__: -they are fixes for a clang-only feature that hasn't been used so heavily -in the past (without the patches, you get crashes of clang). Then get -the branch `stmgc-c7`_ of PyPy and run:: +version of gcc(!). See the instructions here: +https://bitbucket.org/pypy/stmgc/src/default/gcc-seg-gs/ +(Note that these patches are being incorporated into gcc. It is likely +that future versions of gcc will not need to be patched any more.) - rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py - PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py +Then get the branch `stmgc-c8`_ of PyPy and run:: -.. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ + cd pypy/goal + ../../rpython/bin/rpython -Ojit --stm + +At the end, this will try to compile the generated C code by calling +``gcc-seg-gs``, which must be the script you installed in the +instructions above. + .. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/ -.. __: https://bitbucket.org/pypy/pypy/downloads/ -.. __: http://clang.llvm.org/get_started.html -.. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ .. _caveats: @@ -113,6 +111,12 @@ Current status (stmgc-c7) ------------------------- +.. warning:: + + THIS PAGE IS OLD, THE REST IS ABOUT STMGC-C7 WHEREAS THE CURRENT + DEVELOPMENT WORK IS DONE ON STMGC-C8 + + * **NEW:** It seems to work fine, without crashing any more. Please `report any crash`_ you find (or other bugs). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -67,3 +70,34 @@ Simplification. Backends implement too many loading instructions, only having a slightly different interface. Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -289,6 +289,8 @@ for w_item in space.fixedview(obj): result_w.append(self._make_key(w_item)) w_key = space.newtuple(result_w[:]) + elif isinstance(obj, PyCode): + w_key = space.newtuple([obj, w_type, space.id(obj)]) else: w_key = space.newtuple([obj, w_type]) return w_key diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -931,6 +931,11 @@ finally: space.call_function(w_set_debug, space.w_True) + def test_dont_fold_equal_code_objects(self): + yield self.st, "f=lambda:1;g=lambda:1.0;x=g()", 'type(x)', float + yield (self.st, "x=(lambda: (-0.0, 0.0), lambda: (0.0, -0.0))[1]()", + 'repr(x)', '(0.0, -0.0)') + class AppTestCompiler: diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload, clibffi -VERSION = "1.4.0" +VERSION = "1.4.2" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/call_python.py b/pypy/module/_cffi_backend/call_python.py --- a/pypy/module/_cffi_backend/call_python.py +++ b/pypy/module/_cffi_backend/call_python.py @@ -40,10 +40,9 @@ at least 8 bytes in size. """ from pypy.module._cffi_backend.ccallback import reveal_callback + from rpython.rlib import rgil - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -71,9 +70,7 @@ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) rffi.stackcounter.stacks_counter -= 1 - before = rffi.aroundstate.before - if before: - before() + rgil.release() def get_ll_cffi_call_python(): diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.4.0", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.module._file.test.test_file import getfile @@ -13,6 +13,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): from rpython.translator.c.test.test_extfunc import need_sparse_files + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_large_seek_offsets(self): diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -7,7 +7,9 @@ from pypy.module.exceptions.interp_exceptions import W_IOError from pypy.module._io.interp_fileio import W_FileIO from pypy.module._io.interp_textio import W_TextIOWrapper -from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES +from rpython.rlib.rposix_stat import STAT_FIELD_TYPES + +HAS_BLKSIZE = 'st_blksize' in STAT_FIELD_TYPES class Cache: @@ -118,7 +120,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if 'st_blksize' in STAT_FIELD_TYPES: + if HAS_BLKSIZE: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -251,7 +251,7 @@ from pypy.module._socket.interp_socket import addr_as_object if not hasattr(rsocket._c, 'sockaddr_ll'): py.test.skip("posix specific test") - # HACK: To get the correct interface numer of lo, which in most cases is 1, + # HACK: To get the correct interface number of lo, which in most cases is 1, # but can be anything (i.e. 39), we need to call the libc function # if_nametoindex to get the correct index import ctypes @@ -513,7 +513,7 @@ def test_getsetsockopt(self): import _socket as socket import struct - # A socket sould start with reuse == 0 + # A socket should start with reuse == 0 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) reuse = s.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) assert reuse == 0 @@ -627,6 +627,26 @@ self.foo = _socket.socket() +class AppTestNetlink: + def setup_class(cls): + if not hasattr(os, 'getpid'): + py.test.skip("AF_NETLINK needs os.getpid()") + w_ok = space.appexec([], "(): import _socket; " + + "return hasattr(_socket, 'AF_NETLINK')") + if not space.is_true(w_ok): + py.test.skip("no AF_NETLINK on this platform") + cls.space = space + + def test_connect_to_kernel_netlink_routing_socket(self): + import _socket, os + s = _socket.socket(_socket.AF_NETLINK, _socket.SOCK_DGRAM, _socket.NETLINK_ROUTE) + assert s.getsockname() == (0L, 0L) + s.bind((0, 0)) + a, b = s.getsockname() + assert a == os.getpid() + assert b == 0 + + class AppTestPacket: def setup_class(cls): if not hasattr(os, 'getuid') or os.getuid() != 0: diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -124,7 +124,7 @@ METH_COEXIST METH_STATIC METH_CLASS METH_NOARGS METH_VARARGS METH_KEYWORDS METH_O Py_TPFLAGS_HEAPTYPE Py_TPFLAGS_HAVE_CLASS -Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE +Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES """.split() for name in constant_names: setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name)) @@ -602,6 +602,7 @@ # Make the wrapper for the cases (1) and (2) def make_wrapper(space, callable, gil=None): "NOT_RPYTHON" + from rpython.rlib import rgil names = callable.api_func.argnames argtypes_enum_ui = unrolling_iterable(enumerate(zip(callable.api_func.argtypes, [name.startswith("w_") for name in names]))) @@ -617,9 +618,7 @@ # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: - after = rffi.aroundstate.after - if after: - after() + rgil.acquire() rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -692,9 +691,7 @@ pypy_debug_catch_fatal_exception() rffi.stackcounter.stacks_counter -= 1 if gil_release: - before = rffi.aroundstate.before - if before: - before() + rgil.release() return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,14 +4,15 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + CANNOT_FAIL) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -65,22 +66,24 @@ func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, w_self, args_w[0]) def wrap_binaryfunc_r(space, w_self, w_args, func): func_binary = rffi.cast(binaryfunc, func) check_num_args(space, w_args, 1) args_w = space.fixedview(w_args) - - if not space.is_true(space.issubtype(space.type(args_w[0]), - space.type(w_self))): + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): return space.w_NotImplemented - + Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) def wrap_inquirypred(space, w_self, w_args, func): @@ -378,6 +381,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=CANNOT_FAIL, external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", @@ -591,45 +632,92 @@ def test_binaryfunc(self): module = self.import_extension('foo', [ - ("new_obj", "METH_NOARGS", + ("newInt", "METH_VARARGS", """ - FooObject *fooObj; + IntLikeObject *intObj; + long intval; - Foo_Type.tp_as_number = &foo_as_number; - foo_as_number.nb_add = foo_nb_add_call; - if (PyType_Ready(&Foo_Type) < 0) return NULL; - fooObj = PyObject_New(FooObject, &Foo_Type); - if (!fooObj) { + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type.tp_as_number = &intlike_as_number; + IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + intlike_as_number.nb_add = intlike_nb_add; + if (PyType_Ready(&IntLike_Type) < 0) return NULL; + intObj = PyObject_New(IntLikeObject, &IntLike_Type); + if (!intObj) { return NULL; } - return (PyObject *)fooObj; + intObj->ival = intval; + return (PyObject *)intObj; + """), + ("newIntNoOp", "METH_VARARGS", + """ + IntLikeObjectNoOp *intObjNoOp; + long intval; + + if (!PyArg_ParseTuple(args, "i", &intval)) + return NULL; + + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; + intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); + if (!intObjNoOp) { + return NULL; + } + + intObjNoOp->ival = intval; + return (PyObject *)intObjNoOp; """)], """ typedef struct { PyObject_HEAD - } FooObject; + long ival; + } IntLikeObject; static PyObject * - foo_nb_add_call(PyObject *self, PyObject *other) + intlike_nb_add(PyObject *self, PyObject *other) { - return PyInt_FromLong(42); + long val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + long val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong(val1+val2); } - PyTypeObject Foo_Type = { + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, - /*tp_name*/ "Foo", - /*tp_basicsize*/ sizeof(FooObject), + /*tp_name*/ "IntLike", + /*tp_basicsize*/ sizeof(IntLikeObject), }; - static PyNumberMethods foo_as_number; + static PyNumberMethods intlike_as_number; + + typedef struct + { + PyObject_HEAD + long ival; + } IntLikeObjectNoOp; + + PyTypeObject IntLike_Type_NoOp = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "IntLikeNoOp", + /*tp_basicsize*/ sizeof(IntLikeObjectNoOp), + }; """) - a = module.new_obj() - b = module.new_obj() + a = module.newInt(1) + b = module.newInt(2) c = 3 - assert (a + b) == 42 - raises(TypeError, "b + c") + d = module.newIntNoOp(4) + assert (a + b) == 3 + assert (b + c) == 5 + assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): skip("BROKEN") diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -1,6 +1,6 @@ from __future__ import with_statement from rpython.tool.udir import udir -import os +import os, sys, py class AppTestMMap: spaceconfig = dict(usemodules=('mmap',)) @@ -8,6 +8,15 @@ def setup_class(cls): cls.w_tmpname = cls.space.wrap(str(udir.join('mmap-'))) + def setup_method(self, meth): + if getattr(meth, 'is_large', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") + def test_page_size(self): import mmap assert mmap.PAGESIZE > 0 @@ -648,6 +657,7 @@ assert m[0xFFFFFFF] == b'A' finally: m.close() + test_large_offset.is_large = True def test_large_filesize(self): import mmap @@ -665,6 +675,7 @@ assert m.size() == 0x180000000 finally: m.close() + test_large_filesize.is_large = True def test_all(self): # this is a global test, ported from test_mmap.py diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -1,5 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule -from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rlib import rposix import os exec 'import %s as posix' % os.name @@ -172,7 +172,7 @@ if hasattr(os, 'chroot'): interpleveldefs['chroot'] = 'interp_posix.chroot' - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,12 +1,11 @@ import os import sys -from rpython.rlib import rposix, objectmodel, rurandom +from rpython.rlib import rposix, rposix_stat +from rpython.rlib import objectmodel, rurandom from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.module import ll_os_stat -from rpython.rtyper.module.ll_os import RegisterOs from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.error import OperationError, wrap_oserror, wrap_oserror2 @@ -43,6 +42,8 @@ return space.str0_w(w_obj) class FileEncoder(object): + is_unicode = True + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -54,6 +55,8 @@ return self.space.unicode0_w(self.w_obj) class FileDecoder(object): + is_unicode = False + def __init__(self, space, w_obj): self.space = space self.w_obj = w_obj @@ -212,13 +215,13 @@ # ____________________________________________________________ -STAT_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STAT_FIELDS)) +STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -STATVFS_FIELDS = unrolling_iterable(enumerate(ll_os_stat.STATVFS_FIELDS)) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_stat_result(space, st): FIELDS = STAT_FIELDS # also when not translating at all - lst = [None] * ll_os_stat.N_INDEXABLE_FIELDS + lst = [None] * rposix_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() stat_float_times = space.fromcache(StatState).stat_float_times for i, (name, TYPE) in FIELDS: @@ -226,7 +229,7 @@ if name in ('st_atime', 'st_mtime', 'st_ctime'): value = int(value) # rounded to an integer for indexed access w_value = space.wrap(value) - if i < ll_os_stat.N_INDEXABLE_FIELDS: + if i < rposix_stat.N_INDEXABLE_FIELDS: lst[i] = w_value else: space.setitem(w_keywords, space.wrap(name), w_value) @@ -254,7 +257,7 @@ def build_statvfs_result(space, st): - vals_w = [None] * len(ll_os_stat.STATVFS_FIELDS) + vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) for i, (name, _) in STATVFS_FIELDS: vals_w[i] = space.wrap(getattr(st, name)) w_tuple = space.newtuple(vals_w) @@ -267,7 +270,7 @@ """Perform a stat system call on the file referenced to by an open file descriptor.""" try: - st = os.fstat(fd) + st = rposix_stat.fstat(fd) except OSError, e: raise wrap_oserror(space, e) else: @@ -289,7 +292,7 @@ """ try: - st = dispatch_filename(rposix.stat)(space, w_path) + st = dispatch_filename(rposix_stat.stat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: @@ -298,7 +301,7 @@ def lstat(space, w_path): "Like stat(path), but do no follow symbolic links." try: - st = dispatch_filename(rposix.lstat)(space, w_path) + st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) else: @@ -327,7 +330,7 @@ @unwrap_spec(fd=c_int) def fstatvfs(space, fd): try: - st = os.fstatvfs(fd) + st = rposix_stat.fstatvfs(fd) except OSError as e: raise wrap_oserror(space, e) else: @@ -336,7 +339,7 @@ def statvfs(space, w_path): try: - st = dispatch_filename(rposix.statvfs)(space, w_path) + st = dispatch_filename(rposix_stat.statvfs)(space, w_path) except OSError as e: raise wrap_oserror2(space, e, w_path) else: @@ -427,11 +430,11 @@ try: if space.isinstance_w(w_path, space.w_unicode): path = FileEncoder(space, w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) else: path = space.str0_w(w_path) - fullpath = rposix._getfullpathname(path) + fullpath = rposix.getfullpathname(path) w_fullpath = space.wrap(fullpath) except OSError, e: raise wrap_oserror2(space, e, w_path) @@ -661,7 +664,7 @@ def kill(space, pid, sig): "Kill a process with a signal." try: - rposix.os_kill(pid, sig) + rposix.kill(pid, sig) except OSError, e: raise wrap_oserror(space, e) @@ -677,7 +680,7 @@ """Abort the interpreter immediately. This 'dumps core' or otherwise fails in the hardest way possible on the hosting operating system.""" import signal - rposix.os_kill(os.getpid(), signal.SIGABRT) + rposix.kill(os.getpid(), signal.SIGABRT) @unwrap_spec(src='str0', dst='str0') def link(space, src, dst): @@ -1199,7 +1202,7 @@ raise wrap_oserror(space, e) def declare_new_w_star(name): - if name in RegisterOs.w_star_returning_int: + if name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG'): @unwrap_spec(status=c_int) def WSTAR(space, status): return space.wrap(getattr(os, name)(status)) @@ -1211,7 +1214,7 @@ WSTAR.func_name = name return WSTAR -for name in RegisterOs.w_star: +for name in rposix.WAIT_MACROS: if hasattr(os, name): func = declare_new_w_star(name) globals()[name] = func diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -6,8 +6,8 @@ from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace from pypy.conftest import pypydir -from rpython.rtyper.module.ll_os import RegisterOs from rpython.translator.c.test.test_extfunc import need_sparse_files +from rpython.rlib import rposix import os import py import sys @@ -93,6 +93,12 @@ def setup_method(self, meth): if getattr(meth, 'need_sparse_files', False): + if sys.maxsize < 2**32 and not self.runappdirect: + # this fails because it uses ll2ctypes to call the posix + # functions like 'open' and 'lseek', whereas a real compiled + # C program would macro-define them to their longlong versions + py.test.skip("emulation of files can't use " + "larger-than-long offsets") need_sparse_files() def test_posix_is_pypy_s(self): @@ -576,7 +582,7 @@ raises(TypeError, "os.utime('xxx', 3)") raises(OSError, "os.utime('somefilewhichihopewouldneverappearhere', None)") - for name in RegisterOs.w_star: + for name in rposix.WAIT_MACROS: if hasattr(os, name): values = [0, 1, 127, 128, 255] code = py.code.Source(""" diff --git a/pypy/module/signal/__init__.py b/pypy/module/signal/__init__.py --- a/pypy/module/signal/__init__.py +++ b/pypy/module/signal/__init__.py @@ -48,3 +48,6 @@ use_bytecode_counter=False) space.actionflag.__class__ = interp_signal.SignalActionFlag # xxx yes I know the previous line is a hack + + def startup(self, space): + space.check_signal_action.startup(space) diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py --- a/pypy/module/signal/interp_signal.py +++ b/pypy/module/signal/interp_signal.py @@ -63,19 +63,25 @@ AsyncAction.__init__(self, space) self.pending_signal = -1 self.fire_in_another_thread = False - if self.space.config.objspace.usemodules.thread: - from pypy.module.thread import gil - gil.after_thread_switch = self._after_thread_switch + # + @rgc.no_collect + def _after_thread_switch(): + if self.fire_in_another_thread: + if self.space.threadlocals.signals_enabled(): + self.fire_in_another_thread = False + self.space.actionflag.rearm_ticker() + # this occurs when we just switched to the main thread + # and there is a signal pending: we force the ticker to + # -1, which should ensure perform() is called quickly. + self._after_thread_switch = _after_thread_switch + # ^^^ so that 'self._after_thread_switch' can be annotated as a + # constant - @rgc.no_collect - def _after_thread_switch(self): - if self.fire_in_another_thread: - if self.space.threadlocals.signals_enabled(): - self.fire_in_another_thread = False - self.space.actionflag.rearm_ticker() - # this occurs when we just switched to the main thread - # and there is a signal pending: we force the ticker to - # -1, which should ensure perform() is called quickly. + def startup(self, space): + # this is translated + if space.config.objspace.usemodules.thread: + from rpython.rlib import rgil + rgil.invoke_after_thread_switch(self._after_thread_switch) def perform(self, executioncontext, frame): self._poll_for_signals() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -1353,8 +1353,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 5): + py.test.skip("re-enable me in version 1.5") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -11,7 +11,6 @@ from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals -from rpython.rlib.objectmodel import invoke_around_extcall class GILThreadLocals(OSThreadLocals): """A version of OSThreadLocals that enforces a GIL.""" @@ -23,34 +22,21 @@ space.actionflag.register_periodic_action(GILReleaseAction(space), use_bytecode_counter=True) - def _initialize_gil(self, space): - rgil.gil_allocate() - def setup_threads(self, space): """Enable threads in the object space, if they haven't already been.""" if not self.gil_ready: - self._initialize_gil(space) + # Note: this is a quasi-immutable read by module/pypyjit/interp_jit + # It must be changed (to True) only if it was really False before + rgil.allocate() self.gil_ready = True result = True else: result = False # already set up - - # add the GIL-releasing callback around external function calls. - # - # XXX we assume a single space, but this is not quite true during - # testing; for example, if you run the whole of test_lock you get - # a deadlock caused by the first test's space being reused by - # test_lock_again after the global state was cleared by - # test_compile_lock. As a workaround, we repatch these global - # fields systematically. - invoke_around_extcall(before_external_call, after_external_call) return result - def reinit_threads(self, space): - "Called in the child process after a fork()" - OSThreadLocals.reinit_threads(self, space) - if self.gil_ready: # re-initialize the gil if needed - self._initialize_gil(space) + ## def reinit_threads(self, space): + ## "Called in the child process after a fork()" + ## OSThreadLocals.reinit_threads(self, space) class GILReleaseAction(PeriodicAsyncAction): @@ -59,43 +45,4 @@ """ def perform(self, executioncontext, frame): - do_yield_thread() - - -after_thread_switch = lambda: None # hook for signal.py - -def before_external_call(): - # this function must not raise, in such a way that the exception - # transformer knows that it cannot raise! - rgil.gil_release() -before_external_call._gctransformer_hint_cannot_collect_ = True -before_external_call._dont_reach_me_in_del_ = True - -def after_external_call(): - rgil.gil_acquire() - rthread.gc_thread_run() - after_thread_switch() -after_external_call._gctransformer_hint_cannot_collect_ = True -after_external_call._dont_reach_me_in_del_ = True - -# The _gctransformer_hint_cannot_collect_ hack is needed for -# translations in which the *_external_call() functions are not inlined. -# They tell the gctransformer not to save and restore the local GC -# pointers in the shadow stack. This is necessary because the GIL is -# not held after the call to before_external_call() or before the call -# to after_external_call(). - -def do_yield_thread(): - # explicitly release the gil, in a way that tries to give more - # priority to other threads (as opposed to continuing to run in - # the same thread). - if rgil.gil_yield_thread(): - rthread.gc_thread_run() - after_thread_switch() -do_yield_thread._gctransformer_hint_close_stack_ = True -do_yield_thread._dont_reach_me_in_del_ = True -do_yield_thread._dont_inline_ = True - -# do_yield_thread() needs a different hint: _gctransformer_hint_close_stack_. -# The *_external_call() functions are themselves called only from the rffi -# module from a helper function that also has this hint. + rgil.yield_thread() diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -5,7 +5,7 @@ import errno from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module.thread import gil +from rpython.rlib import rgil NORMAL_TIMEOUT = 300.0 # 5 minutes @@ -15,9 +15,9 @@ adaptivedelay = 0.04 limit = time.time() + delay * NORMAL_TIMEOUT while time.time() <= limit: - gil.before_external_call() + rgil.release() time.sleep(adaptivedelay) - gil.after_external_call() + rgil.acquire() gc.collect() if space.is_true(space.call_function(w_condition)): return diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -1,5 +1,6 @@ import time from pypy.module.thread import gil +from rpython.rlib import rgil from rpython.rlib.test import test_rthread from rpython.rlib import rthread as thread from rpython.rlib.objectmodel import we_are_translated @@ -55,7 +56,7 @@ assert state.datalen3 == len(state.data) assert state.datalen4 == len(state.data) debug_print(main, i, state.datalen4) - gil.do_yield_thread() + rgil.yield_thread() assert i == j j += 1 def bootstrap(): @@ -82,9 +83,9 @@ if not still_waiting: raise ValueError("time out") still_waiting -= 1 - if not we_are_translated(): gil.before_external_call() + if not we_are_translated(): rgil.release() time.sleep(0.01) - if not we_are_translated(): gil.after_external_call() + if not we_are_translated(): rgil.acquire() debug_print("leaving!") i1 = i2 = 0 for tid, i in state.data: diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py --- a/pypy/module/time/interp_time.py +++ b/pypy/module/time/interp_time.py @@ -482,13 +482,6 @@ secs = pytime.time() return space.wrap(secs) -if _WIN: - class PCCache: - pass - pccache = PCCache() - pccache.divisor = 0.0 - pccache.ctrStart = 0 - def clock(space): """clock() -> floating point number diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -521,7 +521,6 @@ def descr_getitem(self, space, w_index): if isinstance(w_index, W_SliceObject): - # XXX consider to extend rlist's functionality? length = self.length() start, stop, step, slicelength = w_index.indices4(space, length) assert slicelength >= 0 diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3516,6 +3516,32 @@ s = a.build_types(f, [unicode]) assert isinstance(s, annmodel.SomeUnicodeString) + def test_extended_slice(self): + a = self.RPythonAnnotator() + def f(start, end, step): + return [1, 2, 3][start:end:step] + with py.test.raises(AnnotatorError): + a.build_types(f, [int, int, int]) + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True), + annmodel.SomeInteger(nonneg=True)]) + def f(x): + return x[::-1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[::2] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) + def f(x): + return x[1:2:1] + a = self.RPythonAnnotator() + with py.test.raises(AnnotatorError): + a.build_types(f, [str]) def test_negative_slice(self): def f(s, e): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -441,7 +441,7 @@ def dict_contains(s_dct, s_element, position): s_dct.dictdef.generalize_key(s_element) if s_dct._is_empty(position): - s_bool =SomeBool() + s_bool = SomeBool() s_bool.const = False return s_bool return s_Bool @@ -686,7 +686,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for unicode" % (enc,)) - return SomeString() + return SomeString(no_nul=self.no_nul) method_encode.can_only_throw = [UnicodeEncodeError] @@ -719,7 +719,7 @@ enc = s_enc.const if enc not in ('ascii', 'latin-1', 'utf-8'): raise AnnotatorError("Encoding %s not supported for strings" % (enc,)) - return SomeUnicodeString() + return SomeUnicodeString(no_nul=self.no_nul) method_decode.can_only_throw = [UnicodeDecodeError] class __extend__(SomeChar, SomeUnicodeCodePoint): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -23,7 +23,7 @@ if func.func_code.co_cellvars: raise ValueError( """RPython functions cannot create closures -Possible casues: +Possible causes: Function is inner function Function uses generator expressions Lambda expressions diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,5 +1,5 @@ """ -This module defines all the SpaceOeprations used in rpython.flowspace. +This module defines all the SpaceOperations used in rpython.flowspace. """ import __builtin__ @@ -196,21 +196,6 @@ return cls._dispatch(type(s_arg)) @classmethod - def get_specialization(cls, s_arg, *_ignored): - try: - impl = getattr(s_arg, cls.opname) - - def specialized(annotator, arg, *other_args): - return impl(*[annotator.annotation(x) for x in other_args]) - try: - specialized.can_only_throw = impl.can_only_throw - except AttributeError: - pass - return specialized - except AttributeError: - return cls._dispatch(type(s_arg)) - - @classmethod def register_transform(cls, Some_cls): def decorator(func): cls._transform[Some_cls] = func @@ -523,6 +508,14 @@ *[annotator.annotation(arg) for arg in self.args]) +class NewSlice(HLOperation): + opname = 'newslice' + canraise = [] + + def consider(self, annotator): + raise AnnotatorError("Cannot use extended slicing in rpython") + + class Pow(PureOperation): opname = 'pow' arity = 3 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -19,7 +19,6 @@ from rpython.jit.backend.arm.locations import imm, RawSPStackLocation from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.history import (AbstractFailDescr, ConstInt, @@ -655,31 +654,24 @@ pmc.B_offs(offset, c.EQ) return fcond - def emit_op_setfield_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs, size = arglocs - scale = get_scale(size.value) - self._write_to_mem(value_loc, base_loc, - ofs, imm(scale), fcond) + def emit_op_gc_store(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, ofs_loc, size_loc = arglocs + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, ofs_loc, imm(scale), fcond) return fcond - emit_op_setfield_raw = emit_op_setfield_gc - emit_op_zero_ptr_field = emit_op_setfield_gc - - def _genop_getfield(self, op, arglocs, regalloc, fcond): - base_loc, ofs, res, size = arglocs - signed = op.getdescr().is_field_signed() - scale = get_scale(size.value) - self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond) + def _emit_op_gc_load(self, op, arglocs, regalloc, fcond): + base_loc, ofs_loc, res_loc, nsize_loc = arglocs + nsize = nsize_loc.value + signed = (nsize < 0) + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, ofs_loc, imm(scale), + signed, fcond) return fcond - emit_op_getfield_gc_i = _genop_getfield - emit_op_getfield_gc_r = _genop_getfield - emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield - emit_op_getfield_raw_i = _genop_getfield - emit_op_getfield_raw_f = _genop_getfield + emit_op_gc_load_i = _emit_op_gc_load + emit_op_gc_load_r = _emit_op_gc_load + emit_op_gc_load_f = _emit_op_gc_load def emit_op_increment_debug_counter(self, op, arglocs, regalloc, fcond): base_loc, value_loc = arglocs @@ -688,68 +680,21 @@ self.mc.STR_ri(value_loc.value, base_loc.value, 0, cond=fcond) return fcond - def _genop_getinteriorfield(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, res_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - descr = op.getdescr() - assert isinstance(descr, InteriorFieldDescr) - signed = descr.fielddescr.is_field_signed() - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - ofs_loc = tmploc - self._load_from_mem(res_loc, base_loc, ofs_loc, - imm(scale), signed, fcond) - return fcond - - emit_op_getinteriorfield_gc_i = _genop_getinteriorfield - emit_op_getinteriorfield_gc_r = _genop_getinteriorfield - emit_op_getinteriorfield_gc_f = _genop_getinteriorfield - - def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond): - (base_loc, index_loc, value_loc, - ofs_loc, ofs, itemsize, fieldsize) = arglocs - scale = get_scale(fieldsize.value) - tmploc, save = self.get_tmp_reg([base_loc, index_loc, value_loc, ofs_loc]) - assert not save - self.mc.gen_load_int(tmploc.value, itemsize.value) - self.mc.MUL(tmploc.value, index_loc.value, tmploc.value) - if ofs.value > 0: - if ofs_loc.is_imm(): - self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value) - else: - self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value) - self._write_to_mem(value_loc, base_loc, tmploc, imm(scale), fcond) - return fcond - emit_op_setinteriorfield_raw = emit_op_setinteriorfield_gc - - def emit_op_arraylen_gc(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs = arglocs - self.load_reg(self.mc, res, base_loc, ofs.value) - return fcond - - def emit_op_setarrayitem_gc(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - + def emit_op_gc_store_indexed(self, op, arglocs, regalloc, fcond): + value_loc, base_loc, index_loc, size_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + scale = get_scale(size_loc.value) + self._write_to_mem(value_loc, base_loc, index_loc, imm(scale), fcond) return fcond def _write_to_mem(self, value_loc, base_loc, ofs_loc, scale, fcond=c.AL): + # Write a value of size '1 << scale' at the address + # 'base_ofs + ofs_loc'. Note that 'scale' is not used to scale + # the offset! if scale.value == 3: assert value_loc.is_vfp_reg() # vstr only supports imm offsets @@ -789,43 +734,31 @@ else: assert 0 - emit_op_setarrayitem_raw = emit_op_setarrayitem_gc - - def emit_op_raw_store(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - self._write_to_mem(value_loc, base_loc, ofs_loc, scale, fcond) + def _emit_op_gc_load_indexed(self, op, arglocs, regalloc, fcond): + res_loc, base_loc, index_loc, nsize_loc, ofs_loc = arglocs + assert index_loc.is_core_reg() + nsize = nsize_loc.value + signed = (nsize < 0) + # add the base offset + if ofs_loc.value > 0: + self.mc.ADD_ri(r.ip.value, index_loc.value, imm=ofs_loc.value) + index_loc = r.ip + # + scale = get_scale(abs(nsize)) + self._load_from_mem(res_loc, base_loc, index_loc, imm(scale), + signed, fcond) return fcond - def _genop_getarrayitem(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - signed = op.getdescr().is_item_signed() - - # scale the offset as required - # XXX we should try to encode the scale inside the "shift" part of LDR - if scale.value > 0: - self.mc.LSL_ri(r.ip.value, ofs_loc.value, scale.value) - ofs_loc = r.ip - # add the base offset - if ofs.value > 0: - self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value) - ofs_loc = r.ip - # - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_getarrayitem_gc_i = _genop_getarrayitem - emit_op_getarrayitem_gc_r = _genop_getarrayitem - emit_op_getarrayitem_gc_f = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_i = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_r = _genop_getarrayitem - emit_op_getarrayitem_gc_pure_f = _genop_getarrayitem - emit_op_getarrayitem_raw_i = _genop_getarrayitem - emit_op_getarrayitem_raw_f = _genop_getarrayitem + emit_op_gc_load_indexed_i = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_r = _emit_op_gc_load_indexed + emit_op_gc_load_indexed_f = _emit_op_gc_load_indexed def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale, signed=False, fcond=c.AL): + # Load a value of '1 << scale' bytes, from the memory location + # 'base_loc + ofs_loc'. Note that 'scale' is not used to scale + # the offset! + # if scale.value == 3: assert res_loc.is_vfp_reg() # vldr only supports imm offsets @@ -881,51 +814,6 @@ else: assert 0 - def _genop_raw_load(self, op, arglocs, regalloc, fcond): - res_loc, base_loc, ofs_loc, scale, ofs = arglocs - assert ofs_loc.is_core_reg() - # no base offset - assert ofs.value == 0 - signed = op.getdescr().is_item_signed() - self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond) - return fcond - - emit_op_raw_load_i = _genop_raw_load - emit_op_raw_load_f = _genop_raw_load - - def emit_op_strlen(self, op, arglocs, regalloc, fcond): - l0, l1, res = arglocs - if l1.is_imm(): - self.mc.LDR_ri(res.value, l0.value, l1.getint(), cond=fcond) - else: - self.mc.LDR_rr(res.value, l0.value, l1.value, cond=fcond) - return fcond - - def emit_op_strgetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.LDRB_ri(res.value, r.ip.value, basesize.value, cond=fcond) - return fcond - - def emit_op_strsetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, basesize = arglocs - if ofs_loc.is_imm(): - self.mc.ADD_ri(r.ip.value, base_loc.value, ofs_loc.getint(), - cond=fcond) - else: - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, - cond=fcond) - - self.mc.STRB_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - return fcond - #from ../x86/regalloc.py:928 ff. def emit_op_copystrcontent(self, op, arglocs, regalloc, fcond): assert len(arglocs) == 0 @@ -1016,35 +904,6 @@ else: raise AssertionError("bad unicode item size") - emit_op_unicodelen = emit_op_strlen - - def emit_op_unicodegetitem(self, op, arglocs, regalloc, fcond): - res, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.LDR_ri(res.value, r.ip.value, basesize.value, cond=fcond) - elif scale.value == 1: - self.mc.LDRH_ri(res.value, r.ip.value, basesize.value, cond=fcond) - else: - assert 0, itemsize.value - return fcond - - def emit_op_unicodesetitem(self, op, arglocs, regalloc, fcond): - value_loc, base_loc, ofs_loc, scale, basesize, itemsize = arglocs - self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value, cond=fcond, - imm=scale.value, shifttype=shift.LSL) - if scale.value == 2: - self.mc.STR_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - elif scale.value == 1: - self.mc.STRH_ri(value_loc.value, r.ip.value, basesize.value, - cond=fcond) - else: - assert 0, itemsize.value - - return fcond - def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -34,9 +34,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.rlib.rarithmetic import r_uint from rpython.jit.backend.llsupport.descr import CallDescr @@ -802,15 +799,12 @@ src_locations2, dst_locations2, vfptmploc) return [] - def prepare_op_setfield_gc(self, op, fcond): + def prepare_op_gc_store(self, op, fcond): boxes = op.getarglist() - ofs, size, sign = unpack_fielddescr(op.getdescr()) - return self._prepare_op_setfield(boxes, ofs, size) - - def _prepare_op_setfield(self, boxes, ofs, size): - a0, a1 = boxes - base_loc = self.make_sure_var_in_reg(a0, boxes) - value_loc = self.make_sure_var_in_reg(a1, boxes) + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + ofs = boxes[1].getint() + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -819,19 +813,13 @@ self.assembler.load(ofs_loc, imm(ofs)) return [value_loc, base_loc, ofs_loc, imm(size)] - prepare_op_setfield_raw = prepare_op_setfield_gc - - def prepare_op_zero_ptr_field(self, op, fcond): + def _prepare_op_gc_load(self, op, fcond): a0 = op.getarg(0) ofs = op.getarg(1).getint() - return self._prepare_op_setfield([a0, ConstInt(0)], ofs, WORD) - - def _prepare_op_getfield(self, op, fcond): - a0 = op.getarg(0) - ofs, size, sign = unpack_fielddescr(op.getdescr()) + nsize = op.getarg(2).getint() # negative for "signed" base_loc = self.make_sure_var_in_reg(a0) immofs = imm(ofs) - ofs_size = default_imm_size if size < 8 else VMEM_imm_size + ofs_size = default_imm_size if abs(nsize) < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = immofs else: @@ -839,17 +827,12 @@ self.assembler.load(ofs_loc, immofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size)] + res_loc = self.force_allocate_reg(op) + return [base_loc, ofs_loc, res_loc, imm(nsize)] - prepare_op_getfield_gc_i = _prepare_op_getfield - prepare_op_getfield_gc_r = _prepare_op_getfield - prepare_op_getfield_gc_f = _prepare_op_getfield - prepare_op_getfield_raw_i = _prepare_op_getfield - prepare_op_getfield_raw_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield + prepare_op_gc_load_i = _prepare_op_gc_load + prepare_op_gc_load_r = _prepare_op_gc_load + prepare_op_gc_load_f = _prepare_op_gc_load def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() @@ -859,188 +842,38 @@ self.free_temp_vars() return [base_loc, value_loc] - def _prepare_op_getinteriorfield(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) + def prepare_op_gc_store_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + value_loc = self.make_sure_var_in_reg(boxes[2], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[3].getint() == 1 # scale + ofs = boxes[4].getint() + size = boxes[5].getint() + assert check_imm_arg(ofs) + return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] + + def _prepare_op_gc_load_indexed(self, op, fcond): + boxes = op.getarglist() + base_loc = self.make_sure_var_in_reg(boxes[0], boxes) + index_loc = self.make_sure_var_in_reg(boxes[1], boxes) + assert boxes[2].getint() == 1 # scale + ofs = boxes[3].getint() + nsize = boxes[4].getint() + assert check_imm_arg(ofs) self.possibly_free_vars_for_op(op) self.free_temp_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] + res_loc = self.force_allocate_reg(op) + return [res_loc, base_loc, index_loc, imm(nsize), imm(ofs)] - prepare_op_getinteriorfield_gc_i = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_r = _prepare_op_getinteriorfield - prepare_op_getinteriorfield_gc_f = _prepare_op_getinteriorfield - - def prepare_op_setinteriorfield_gc(self, op, fcond): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(op.getarg(0), args) - index_loc = self.make_sure_var_in_reg(op.getarg(1), args) - value_loc = self.make_sure_var_in_reg(op.getarg(2), args) - immofs = imm(ofs) - ofs_size = default_imm_size if fieldsize < 8 else VMEM_imm_size - if check_imm_arg(ofs, size=ofs_size): - ofs_loc = immofs - else: - ofs_loc = self.get_scratch_reg(INT, args) - self.assembler.load(ofs_loc, immofs) - return [base_loc, index_loc, value_loc, ofs_loc, imm(ofs), - imm(itemsize), imm(fieldsize)] - prepare_op_setinteriorfield_raw = prepare_op_setinteriorfield_gc - - def prepare_op_arraylen_gc(self, op, fcond): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - arg = op.getarg(0) - base_loc = self.make_sure_var_in_reg(arg) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_op_setarrayitem_gc(self, op, fcond): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - args = op.getarglist() - base_loc = self.make_sure_var_in_reg(args[0], args) - value_loc = self.make_sure_var_in_reg(args[2], args) - ofs_loc = self.make_sure_var_in_reg(args[1], args) - assert check_imm_arg(ofs) - return [value_loc, base_loc, ofs_loc, imm(scale), imm(ofs)] - prepare_op_setarrayitem_raw = prepare_op_setarrayitem_gc - prepare_op_raw_store = prepare_op_setarrayitem_gc - - def _prepare_op_getarrayitem(self, op, fcond): - boxes = op.getarglist() - size, ofs, _ = unpack_arraydescr(op.getdescr()) - scale = get_scale(size) - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - assert check_imm_arg(ofs) - return [res, base_loc, ofs_loc, imm(scale), imm(ofs)] - - prepare_op_getarrayitem_gc_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_raw_f = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_i = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_r = _prepare_op_getarrayitem - prepare_op_getarrayitem_gc_pure_f = _prepare_op_getarrayitem - prepare_op_raw_load_i = _prepare_op_getarrayitem - prepare_op_raw_load_f = _prepare_op_getarrayitem - - def prepare_op_strlen(self, op, fcond): - args = op.getarglist() - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, args) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - - res = self.force_allocate_reg(op) - self.possibly_free_var(op) - return [l0, l1, res] - - def prepare_op_strgetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0]) - - a1 = boxes[1] - imm_a1 = check_imm_box(a1) - if imm_a1: - ofs_loc = self.convert_to_imm(a1) - else: - ofs_loc = self.make_sure_var_in_reg(a1, boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [res, base_loc, ofs_loc, imm(basesize)] - - def prepare_op_strsetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - assert itemsize == 1 - return [value_loc, base_loc, ofs_loc, imm(basesize)] + prepare_op_gc_load_indexed_i = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_r = _prepare_op_gc_load_indexed + prepare_op_gc_load_indexed_f = _prepare_op_gc_load_indexed prepare_op_copystrcontent = void prepare_op_copyunicodecontent = void prepare_op_zero_array = void - def prepare_op_unicodelen(self, op, fcond): - l0 = self.make_sure_var_in_reg(op.getarg(0)) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - immofs = imm(ofs_length) - if check_imm_arg(ofs_length): - l1 = immofs - else: - l1 = self.get_scratch_reg(INT, [op.getarg(0)]) - self.assembler.load(l1, immofs) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - return [l0, l1, res] - - def prepare_op_unicodegetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - - self.possibly_free_vars_for_op(op) - self.free_temp_vars() - res = self.force_allocate_reg(op) - - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [res, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - - def prepare_op_unicodesetitem(self, op, fcond): - boxes = op.getarglist() - base_loc = self.make_sure_var_in_reg(boxes[0], boxes) - ofs_loc = self.make_sure_var_in_reg(boxes[1], boxes) - value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - scale = itemsize / 2 - return [value_loc, base_loc, ofs_loc, - imm(scale), imm(basesize), imm(itemsize)] - def _prepare_op_same_as(self, op, fcond): arg = op.getarg(0) imm_arg = check_imm_box(arg) @@ -1142,8 +975,7 @@ def prepare_op_cond_call_gc_wb(self, op, fcond): # we force all arguments in a reg because it will be needed anyway by - # the following setfield_gc or setarrayitem_gc. It avoids loading it - # twice from the memory. + # the following gc_store. It avoids loading it twice from the memory. N = op.numargs() args = op.getarglist() arglocs = [self.make_sure_var_in_reg(op.getarg(i), args) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -29,6 +29,10 @@ float_regs = VFPRegisterManager.all_regs frame_reg = fp + # can an ISA instruction handle a factor to the offset? + # XXX should be: tuple(1 << i for i in range(31)) + load_supported_factors = (1,) + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): AbstractLLCPU.__init__(self, rtyper, stats, opts, diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -380,6 +380,8 @@ # the call that it is no longer equal to css. See description # in translator/c/src/thread_pthread.c. + # XXX some duplicated logic here, but note that rgil.acquire() + # does more than just RPyGilAcquire() if old_rpy_fastgil == 0: # this case occurs if some other thread stole the GIL but # released it again. What occurred here is that we changed @@ -390,9 +392,8 @@ elif old_rpy_fastgil == 1: # 'rpy_fastgil' was (and still is) locked by someone else. # We need to wait for the regular mutex. - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() else: # stole the GIL from a different thread that is also # currently in an external call from the jit. Attach @@ -421,9 +422,8 @@ # 'rpy_fastgil' contains only zero or non-zero, and this is only # called when the old value stored in 'rpy_fastgil' was non-zero # (i.e. still locked, must wait with the regular mutex) - after = rffi.aroundstate.after - if after: - after() + from rpython.rlib import rgil + rgil.acquire() _REACQGIL0_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void)) _REACQGIL2_FUNC = lltype.Ptr(lltype.FuncType([rffi.CCHARP, lltype.Signed], diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py From pypy.commits at gmail.com Mon Dec 28 06:59:23 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 03:59:23 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: zero array passes again, needed to pass both the start scale and the length scale to the backend, Message-ID: <5681241b.6a69c20a.d649d.4de7@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81451:d826b4e1e7da Date: 2015-12-28 12:58 +0100 http://bitbucket.org/pypy/pypy/changeset/d826b4e1e7da/ Log: zero array passes again, needed to pass both the start scale and the length scale to the backend, let's see if there are more simplifications diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4991,16 +4991,16 @@ py.test.skip("llgraph does not do zero_array") PAIR = lltype.Struct('PAIR', ('a', lltype.Signed), ('b', lltype.Signed)) - for OF in [rffi.SHORT]: #[lltype.Signed, rffi.INT, rffi.SHORT, rffi.UCHAR, PAIR]: + for OF in [lltype.Signed, rffi.INT, rffi.SHORT, rffi.UCHAR, PAIR]: A = lltype.GcArray(OF) arraydescr = self.cpu.arraydescrof(A) a = lltype.malloc(A, 100) addr = llmemory.cast_ptr_to_adr(a) a_int = heaptracker.adr2int(addr) a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) - for (start, length) in [(0,100), (49, 49)]:#, (1, 98), - #(15, 9), (10, 10), (47, 0), - #(0, 4)]: + for (start, length) in [(0,100), (49, 49), (1, 98), + (15, 9), (10, 10), (47, 0), + (0, 4)]: for cls1 in [ConstInt, InputArgInt]: for cls2 in [ConstInt, InputArgInt]: print 'a_int:', a_int @@ -5033,7 +5033,7 @@ lengthbox, scale, offset) if v_len is None: v_len = ConstInt(e_offset) - import pdb; pdb.set_trace() + #import pdb; pdb.set_trace() args = [InputArgRef(a_ref), v_start, v_len, ConstInt(scale_start), ConstInt(scale_len)] ops.append(ResOperation(rop.ZERO_ARRAY, args, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1415,7 +1415,7 @@ null_loc = self.xrm.force_allocate_reg(null_box) self.xrm.possibly_free_var(null_box) self.perform_discard(op, [base_loc, startindex_loc, - imm(constbytes), imm(len_itemsize), + imm(constbytes), imm(start_itemsize), imm(baseofs), null_loc]) else: # base_loc and startindex_loc are in two regs here (or they are @@ -1423,6 +1423,7 @@ # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. + #import pdb; pdb.set_trace() dstaddr_box = TempVar() dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]]) itemsize_loc = imm(start_itemsize) From pypy.commits at gmail.com Mon Dec 28 07:12:50 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 04:12:50 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: removed _get_interiorfield_addr method and moved a stripped down version to the regalloc class Message-ID: <56812742.6953c20a.10e25.6835@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81452:eb4a9cb58507 Date: 2015-12-28 13:11 +0100 http://bitbucket.org/pypy/pypy/changeset/eb4a9cb58507/ Log: removed _get_interiorfield_addr method and moved a stripped down version to the regalloc class diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5033,7 +5033,6 @@ lengthbox, scale, offset) if v_len is None: v_len = ConstInt(e_offset) - #import pdb; pdb.set_trace() args = [InputArgRef(a_ref), v_start, v_len, ConstInt(scale_start), ConstInt(scale_len)] ops.append(ResOperation(rop.ZERO_ARRAY, args, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1528,20 +1528,6 @@ # return shift - def _get_interiorfield_addr(self, temp_loc, index_loc, itemsize_loc, - base_loc, ofs_loc): - assert isinstance(itemsize_loc, ImmedLoc) - itemsize = itemsize_loc.value - if isinstance(index_loc, ImmedLoc): - temp_loc = imm(index_loc.value * itemsize) - shift = 0 - else: - assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!" - temp_loc = index_loc - shift = get_scale(itemsize) - assert isinstance(ofs_loc, ImmedLoc) - return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) - def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -2368,12 +2354,13 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - if valid_addressing_size(itemsize): - shift = get_scale(itemsize) - else: - shift = self._imul_const_scaled(self.mc, edi.value, - varsizeloc.value, itemsize) - varsizeloc = edi + assert valid_addressing_size(itemsize) + shift = get_scale(itemsize) + #else: + # shift = self._imul_const_scaled(self.mc, edi.value, + # varsizeloc.value, itemsize) + # varsizeloc = edi + # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) assert arraydescr.basesize >= self.gc_minimal_size_in_nursery diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -9,7 +9,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, - valid_addressing_size) + valid_addressing_size, get_scale) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, IS_X86_64, DEFAULT_FRAME_BYTES) @@ -32,6 +32,7 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.lltypesystem import lltype, rffi, rstr from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.jit.backend.x86.regloc import AddressLoc class X86RegisterManager(RegisterManager): @@ -1389,6 +1390,20 @@ def consider_keepalive(self, op): pass + def _scaled_addr(self, index_loc, itemsize_loc, + base_loc, ofs_loc): + assert isinstance(itemsize_loc, ImmedLoc) + itemsize = itemsize_loc.value + if isinstance(index_loc, ImmedLoc): + temp_loc = imm(index_loc.value * itemsize) + shift = 0 + else: + assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!" + temp_loc = index_loc + shift = get_scale(itemsize) + assert isinstance(ofs_loc, ImmedLoc) + return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) + def consider_zero_array(self, op): _, baseofs, _ = unpack_arraydescr(op.getdescr()) length_box = op.getarg(2) @@ -1423,13 +1438,11 @@ # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. - #import pdb; pdb.set_trace() dstaddr_box = TempVar() dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]]) itemsize_loc = imm(start_itemsize) - dst_addr = self.assembler._get_interiorfield_addr( - dstaddr_loc, startindex_loc, itemsize_loc, - base_loc, imm(baseofs)) + dst_addr = self._scaled_addr(startindex_loc, itemsize_loc, + base_loc, imm(baseofs)) self.assembler.mc.LEA(dstaddr_loc, dst_addr) # if constbytes >= 0: @@ -1446,8 +1459,7 @@ bytes_loc = self.rm.force_allocate_reg(bytes_box, [dstaddr_box]) len_itemsize_loc = imm(len_itemsize) - b_adr = self.assembler._get_interiorfield_addr( - bytes_loc, length_loc, len_itemsize_loc, imm0, imm0) + b_adr = self._scaled_addr(length_loc, len_itemsize_loc, imm0, imm0) self.assembler.mc.LEA(bytes_loc, b_adr) length_box = bytes_box length_loc = bytes_loc diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -9,7 +9,7 @@ ebp, r8, r9, r10, r11, r12, r13, r14, r15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc) -from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.resoperation import (rop, ResOperation, VectorOp, VectorGuardOp) from rpython.rlib.objectmodel import we_are_translated From pypy.commits at gmail.com Mon Dec 28 06:26:37 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Dec 2015 03:26:37 -0800 (PST) Subject: [pypy-commit] pypy default: Non-translated-only test fix (test_macro_var_callback): we must now Message-ID: <56811c6d.cdb81c0a.9debb.4137@mx.google.com> Author: Armin Rigo Branch: Changeset: r81449:92260a86b33b Date: 2015-12-28 12:25 +0100 http://bitbucket.org/pypy/pypy/changeset/92260a86b33b/ Log: Non-translated-only test fix (test_macro_var_callback): we must now release/reacquire the gil from thread_gil.c in this mode too diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a From pypy.commits at gmail.com Mon Dec 28 07:45:20 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 04:45:20 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: updated test_rewrite and added parameters through a helper function Message-ID: <56812ee0.95151c0a.96895.5589@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81453:710abc88181b Date: 2015-12-28 13:44 +0100 http://bitbucket.org/pypy/pypy/changeset/710abc88181b/ Log: updated test_rewrite and added parameters through a helper function diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -535,7 +535,7 @@ v_scale = ConstInt(scale) # there is probably no point in doing _emit_mul_if.. for # c_zero! - args = [v_arr, self.c_zero, v_length_scaled, v_scale, v_scale] + args = [v_arr, self.c_zero, v_length_scaled, ConstInt(scale), v_scale] o = ResOperation(rop.ZERO_ARRAY, args, descr=arraydescr) self.emit_op(o) if isinstance(v_length, ConstInt): @@ -660,11 +660,17 @@ try: intset = self.setarrayitems_occurred(box) except KeyError: + start_box = op.getarg(1) + length_box = op.getarg(2) + if isinstance(start_box, ConstInt): + start = start_box.getint() + op.setarg(1, ConstInt(start * scale)) + op.setarg(3, ConstInt(1)) if isinstance(length_box, ConstInt): stop = length_box.getint() scaled_len = stop * scale op.setarg(2, ConstInt(scaled_len)) - op.setarg(3, ConstInt(1)) + op.setarg(4, ConstInt(1)) continue assert op.getarg(1).getint() == 0 # always 'start=0' initially start = 0 @@ -678,6 +684,7 @@ op.setarg(2, ConstInt((stop - start) * scale)) # ^^ may be ConstInt(0); then the operation becomes a no-op op.setarg(3, ConstInt(1)) # set scale to 1 + op.setarg(4, ConstInt(1)) # set scale to 1 del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -36,6 +36,21 @@ assert not isinstance(descr, (str, int)) return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, newvalue, descr.field_size) + def zero_array(baseptr, start, length, descr_name, descr): + assert isinstance(baseptr, str) + assert isinstance(start, (str, int)) + assert isinstance(length, (str, int)) + assert isinstance(descr_name, str) + assert not isinstance(descr, (str,int)) + itemsize = descr.itemsize + start = start * itemsize + length_scale = 1 + if isinstance(length, str): + length_scale = itemsize + else: + length = length * itemsize + return 'zero_array(%s, %s, %s, 1, %d, descr=%s)' % \ + (baseptr, start, length, length_scale, descr_name) def setarrayitem(baseptr, index, newvalue, descr): assert isinstance(baseptr, str) assert isinstance(index, (str, int)) @@ -681,7 +696,7 @@ %(cdescr.basesize + 129 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 129, %(clendescr.field_size)s) - zero_array(p1, 0, 129, %(cdescr.itemsize)d, descr=cdescr) + %(zero_array('p1', 0, 129, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -703,7 +718,7 @@ %(cdescr.basesize + 130 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 130, %(clendescr.field_size)s) - zero_array(p1, 0, 130, %(cdescr.itemsize)d, descr=cdescr) + %(zero_array('p1', 0, 130, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -735,7 +750,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 5, %(clendescr.field_size)s) - zero_array(p1, 0, 5, descr=cdescr) + %(zero_array('p1', 0, 5, 'cdescr', cdescr))s label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -810,7 +825,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -828,7 +843,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, %(2*WORD)d, %(3*WORD)d, 1, descr=cdescr) + %(zero_array('p0', 2, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 1, 'p1', cdescr))s %(setarrayitem('p0', 0, 'p2', cdescr))s jump() @@ -847,7 +862,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, %(3*WORD)d, 1, descr=cdescr) + %(zero_array('p0', 0, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s jump() @@ -867,7 +882,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 2, 'p2', cdescr))s %(setarrayitem('p0', 1, 'p2', cdescr))s @@ -890,7 +905,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, %(5*WORD)d, 0, 1, descr=cdescr) + %(zero_array('p0', 5, 0, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s @@ -913,7 +928,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) @@ -935,7 +950,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) @@ -952,7 +967,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s jump() """) @@ -966,7 +981,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s cond_call_gc_wb_array(p0, 0, descr=wbdescr) %(setarrayitem('p0', 0, 'p1', bdescr))s jump() From pypy.commits at gmail.com Mon Dec 28 08:04:05 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 05:04:05 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: reverted changes to malloc_nuresry_varsize, not sure how this solve this now, but I'll first implement zero_array in s390x Message-ID: <56813345.0357c20a.f7afb.6e32@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r81454:52e124acc665 Date: 2015-12-28 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/52e124acc665/ Log: reverted changes to malloc_nuresry_varsize, not sure how this solve this now, but I'll first implement zero_array in s390x diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -488,8 +488,8 @@ elif arraydescr.itemsize == 0: total_size = arraydescr.basesize elif (self.gc_ll_descr.can_use_nursery_malloc(1) and - self.gen_malloc_nursery_varsize(arraydescr.itemsize, - v_length, op, arraydescr, kind=kind)): + self.gen_malloc_nursery_varsize(arraydescr.itemsize, v_length, + op, arraydescr, kind=kind)): # note that we cannot initialize tid here, because the array # might end up being allocated by malloc_external or some # stuff that initializes GC header fields differently diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2354,12 +2354,12 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - assert valid_addressing_size(itemsize) - shift = get_scale(itemsize) - #else: - # shift = self._imul_const_scaled(self.mc, edi.value, - # varsizeloc.value, itemsize) - # varsizeloc = edi + if valid_addressing_size(itemsize): + shift = get_scale(itemsize) + else: + shift = self._imul_const_scaled(self.mc, edi.value, + varsizeloc.value, itemsize) + varsizeloc = edi # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) From pypy.commits at gmail.com Mon Dec 28 08:05:45 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 05:05:45 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <568133a9.247bc20a.5fe7e.6129@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81456:18c213bb83f6 Date: 2015-12-28 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/18c213bb83f6/ Log: merged default diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -83,10 +86,18 @@ Trivial cleanups in flowspace.operation : fix comment & duplicated method .. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + .. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + .. branch: cpyext-slotdefs .. branch: fix-missing-canraise +.. branch: whatsnew .. branch: fix-2211 -Fix the cryptic exception message when attempting to use extended slicing in rpython +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + CANNOT_FAIL) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -380,6 +381,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=CANNOT_FAIL, external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - size = abs(boxes[3].getint()) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -849,7 +849,7 @@ index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() - size = abs(boxes[5].getint()) + size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,11 +126,11 @@ def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, itemsize, factor, offset): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # - if factor == 1 and offset == 0: - args = [ptr_box, index_box, value_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] newload = ResOperation(rop.GC_STORE, args) else: args = [ptr_box, index_box, value_box, ConstInt(factor), @@ -153,18 +153,15 @@ index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - def _emit_mul_add_if_factor_offset_not_supported(self, index_box, factor, offset): - orig_factor = factor - # factor - must_manually_load_const = False # offset != 0 and not self.cpu.load_constant_offset - if factor != 1 and (factor not in self.cpu.load_supported_factors or \ - (not index_box.is_constant() and must_manually_load_const)): - # enter here if the factor is supported by the cpu - # OR the index is not constant and a new resop must be emitted - # to add the offset - if isinstance(index_box, ConstInt): - index_box = ConstInt(index_box.value * factor) - else: + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu # x & (x - 1) == 0 is a quick test for power of 2 assert factor > 0 if (factor & (factor - 1)) == 0: @@ -174,20 +171,13 @@ index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) self.emit_op(index_box) - factor = 1 - # adjust the constant offset - #if must_manually_load_const: - # if isinstance(index_box, ConstInt): - # index_box = ConstInt(index_box.value + offset) - # else: - # index_box = ResOperation(rop.INT_ADD, [index_box, ConstInt(offset)]) - # self.emit_op(index_box) - # offset = 0 - return factor, offset, index_box + factor = 1 + return factor, offset, index_box - def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, factor, offset, sign, type='i'): + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # if sign: @@ -197,8 +187,8 @@ optype = type if op is not None: optype = op.type - if factor == 1 and offset == 0: - args = [ptr_box, index_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] newload = ResOperation(OpHelpers.get_gc_load(optype), args) else: args = [ptr_box, index_box, ConstInt(factor), @@ -547,9 +537,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], descr=descrs.arraydescr) @@ -560,9 +549,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) @@ -612,15 +600,12 @@ descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - # emit GC_LOAD_INDEXED - itemsize, basesize, _ = unpack_arraydescr(descr) - factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(ConstInt(index), - itemsize, basesize) - args = [frame, index_box, arg, ConstInt(factor), - ConstInt(offset), ConstInt(itemsize)] - self.emit_op(ResOperation(rop.GC_STORE_INDEXED, args)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) descr = op.getdescr() assert isinstance(descr, JitCellToken) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -30,13 +30,26 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): - def trans_getarray_to_load(descr): - size = descr.basesize - if descr.is_item_signed(): - size = -size - return ','.join([str(n) for n in [descr.itemsize, - descr.basesize, - size]]) + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) @@ -376,7 +389,7 @@ gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) gc_store(p2, 0, 1234, 8) - gc_store(p1, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -485,7 +498,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - gc_store_indexed(p0, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -611,19 +624,19 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, 14, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p1, 0, 10, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - gc_store_indexed(p2, 0, i2, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - gc_store_indexed(p3, 0, i2, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -636,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -650,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -671,7 +684,7 @@ zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,7 +706,7 @@ zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -705,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -725,7 +738,7 @@ zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -743,12 +756,12 @@ size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - gc_store_indexed(p1, 0, p2, %(scale)s, %(offset)s, %(size)s) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) """, interiorzdescr=interiorzdescr, scale=scale, offset=offset, size=size) @@ -763,7 +776,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -781,7 +794,7 @@ p1 = nursery_ptr_increment(p0, %(tdescr.size)d) gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -798,7 +811,7 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, i2, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -816,8 +829,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - gc_store_indexed(p0, 1, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -835,8 +848,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -855,9 +868,9 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -878,11 +891,11 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -901,10 +914,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -923,10 +936,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -955,7 +968,7 @@ gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(bdescr.basesize)s, 1) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -991,10 +1004,10 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - gc_store_indexed(p1, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1012,7 +1025,7 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1025,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) - gc_store_indexed(p0, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -1036,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_size.field_size)s) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) - i2 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_depth.field_size)s) - gc_store_indexed(p1, 0, 0, 1, 1, %(jf_extra_stack_depth.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_savedata.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_force_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_guard_exc.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_forward.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s gc_store(p1, 0, i2, %(framelendescr.field_size)s) - gc_store_indexed(p1, 0, ConstClass(frame_info), 1, 1, %(jf_frame_info.field_size)s) - gc_store_indexed(p1, 0, i0, 8, 3, 8) - gc_store_indexed(p1, 1, f0, 8, 5, 8) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1101,7 +1114,7 @@ p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = gc_load_indexed_r(p0, 0, 1, %(tzdescr.field_size)s, %(tzdescr.field_size)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1155,23 +1168,19 @@ # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_raw(p0,i1,descr=ydescr)' '->' - 'i3 = gc_store_indexed(p0,0,i1,1,' - '%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_gc(p0,p0,descr=zdescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' 'cond_call_gc_wb(p0, descr=wbdescr);' - 'i3 = gc_store_indexed(p0,0,p0,1,' - '%(zdescr.offset)s,%(zdescr.field_size)s)'], + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], #[False, (1,), 'i3 = strlen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], [True, (1,), 'i3 = strlen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' @@ -1179,7 +1188,7 @@ # '%(unicodelendescr.offset)s,' # '%(unicodelendescr.field_size)s)'], [True, (1,), 'i3 = unicodelen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1039,7 +1039,8 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1061,7 +1062,8 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1204,8 +1204,12 @@ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- # same paramters as GC_LOAD, but one additional for the value to store - # note that the itemsize is not signed! + # note that the itemsize is not signed (always > 0) # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant 'GC_STORE/4d/n', 'GC_STORE_INDEXED/6d/n', diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -9,6 +9,8 @@ from subprocess import PIPE, Popen def run_subprocess(executable, args, env=None, cwd=None): + if isinstance(args, list): + args = [a.encode('latin1') for a in args] return _run(executable, args, env, cwd) shell_default = False From pypy.commits at gmail.com Mon Dec 28 08:05:47 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 05:05:47 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged zero_array changes of memop-simplify3 Message-ID: <568133ab.a6ebc20a.314e0.630a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81457:992b689427ce Date: 2015-12-28 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/992b689427ce/ Log: merged zero_array changes of memop-simplify3 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -488,8 +488,8 @@ elif arraydescr.itemsize == 0: total_size = arraydescr.basesize elif (self.gc_ll_descr.can_use_nursery_malloc(1) and - self.gen_malloc_nursery_varsize(arraydescr.itemsize, - v_length, op, arraydescr, kind=kind)): + self.gen_malloc_nursery_varsize(arraydescr.itemsize, v_length, + op, arraydescr, kind=kind)): # note that we cannot initialize tid here, because the array # might end up being allocated by malloc_external or some # stuff that initializes GC header fields differently @@ -525,8 +525,18 @@ # See emit_pending_zeros(). (This optimization is done by # hacking the object 'o' in-place: e.g., o.getarg(1) may be # replaced with another constant greater than 0.) - o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], - descr=arraydescr) + #o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], + # descr=arraydescr) + scale = arraydescr.itemsize + v_length_scaled = v_length + if not isinstance(v_length, ConstInt): + scale, offset, v_length_scaled = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + v_scale = ConstInt(scale) + # there is probably no point in doing _emit_mul_if.. for + # c_zero! + args = [v_arr, self.c_zero, v_length_scaled, ConstInt(scale), v_scale] + o = ResOperation(rop.ZERO_ARRAY, args, descr=arraydescr) self.emit_op(o) if isinstance(v_length, ConstInt): self.last_zero_arrays.append(self._newops[-1]) @@ -644,22 +654,37 @@ # are also already in 'newops', which is the point. for op in self.last_zero_arrays: assert op.getopnum() == rop.ZERO_ARRAY + descr = op.getdescr() + scale = descr.itemsize box = op.getarg(0) try: intset = self.setarrayitems_occurred(box) except KeyError: + start_box = op.getarg(1) + length_box = op.getarg(2) + if isinstance(start_box, ConstInt): + start = start_box.getint() + op.setarg(1, ConstInt(start * scale)) + op.setarg(3, ConstInt(1)) + if isinstance(length_box, ConstInt): + stop = length_box.getint() + scaled_len = stop * scale + op.setarg(2, ConstInt(scaled_len)) + op.setarg(4, ConstInt(1)) continue assert op.getarg(1).getint() == 0 # always 'start=0' initially start = 0 while start in intset: start += 1 - op.setarg(1, ConstInt(start)) + op.setarg(1, ConstInt(start * scale)) stop = op.getarg(2).getint() assert start <= stop while stop > start and (stop - 1) in intset: stop -= 1 - op.setarg(2, ConstInt(stop - start)) + op.setarg(2, ConstInt((stop - start) * scale)) # ^^ may be ConstInt(0); then the operation becomes a no-op + op.setarg(3, ConstInt(1)) # set scale to 1 + op.setarg(4, ConstInt(1)) # set scale to 1 del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -36,6 +36,21 @@ assert not isinstance(descr, (str, int)) return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, newvalue, descr.field_size) + def zero_array(baseptr, start, length, descr_name, descr): + assert isinstance(baseptr, str) + assert isinstance(start, (str, int)) + assert isinstance(length, (str, int)) + assert isinstance(descr_name, str) + assert not isinstance(descr, (str,int)) + itemsize = descr.itemsize + start = start * itemsize + length_scale = 1 + if isinstance(length, str): + length_scale = itemsize + else: + length = length * itemsize + return 'zero_array(%s, %s, %s, 1, %d, descr=%s)' % \ + (baseptr, start, length, length_scale, descr_name) def setarrayitem(baseptr, index, newvalue, descr): assert isinstance(baseptr, str) assert isinstance(index, (str, int)) @@ -681,7 +696,7 @@ %(cdescr.basesize + 129 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 129, %(clendescr.field_size)s) - zero_array(p1, 0, 129, descr=cdescr) + %(zero_array('p1', 0, 129, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -703,7 +718,7 @@ %(cdescr.basesize + 130 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 130, %(clendescr.field_size)s) - zero_array(p1, 0, 130, descr=cdescr) + %(zero_array('p1', 0, 130, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -735,7 +750,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 5, %(clendescr.field_size)s) - zero_array(p1, 0, 5, descr=cdescr) + %(zero_array('p1', 0, 5, 'cdescr', cdescr))s label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -810,7 +825,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -828,7 +843,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 2, 3, descr=cdescr) + %(zero_array('p0', 2, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 1, 'p1', cdescr))s %(setarrayitem('p0', 0, 'p2', cdescr))s jump() @@ -847,7 +862,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 3, descr=cdescr) + %(zero_array('p0', 0, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s jump() @@ -867,7 +882,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 2, 'p2', cdescr))s %(setarrayitem('p0', 1, 'p2', cdescr))s @@ -890,7 +905,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 5, 0, descr=cdescr) + %(zero_array('p0', 5, 0, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s @@ -913,7 +928,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) @@ -935,7 +950,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) @@ -952,7 +967,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s jump() """) @@ -966,7 +981,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s cond_call_gc_wb_array(p0, 0, descr=wbdescr) %(setarrayitem('p0', 0, 'p1', bdescr))s jump() diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -22,6 +22,7 @@ from rpython.jit.backend.detect_cpu import autodetect from rpython.jit.backend.llsupport import jitframe from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU +from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler IS_32_BIT = sys.maxint < 2**32 @@ -53,11 +54,15 @@ add_loop_instructions = ['overload for a specific cpu'] bridge_loop_instructions = ['overload for a specific cpu'] + def execute_operation(self, opname, valueboxes, result_type, descr=None): inputargs, operations = self._get_single_operation_list(opname, result_type, valueboxes, descr) + return self.execute_operations(inputargs, operations, result_type) + + def execute_operations(self, inputargs, operations, result_type): looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) args = [] @@ -86,6 +91,23 @@ else: assert False + def _get_operation_list(self, operations, result_type): + inputargs = [] + blacklist = set() + for op in operations: + for arg in op.getarglist(): + if not isinstance(arg, Const) and arg not in inputargs and \ + arg not in blacklist: + inputargs.append(arg) + if op.type != 'v': + blacklist.add(op) + if result_type == 'void': + op1 = ResOperation(rop.FINISH, [], descr=BasicFinalDescr(0)) + else: + op1 = ResOperation(rop.FINISH, [operations[-1]], descr=BasicFinalDescr(0)) + operations.append(op1) + return inputargs, operations + def _get_single_operation_list(self, opnum, result_type, valueboxes, descr): op0 = ResOperation(opnum, valueboxes) @@ -4983,7 +5005,7 @@ addr = llmemory.cast_ptr_to_adr(a) a_int = heaptracker.adr2int(addr) a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) - for (start, length) in [(0, 100), (49, 49), (1, 98), + for (start, length) in [(0,100), (49, 49), (1, 98), (15, 9), (10, 10), (47, 0), (0, 4)]: for cls1 in [ConstInt, InputArgInt]: @@ -5001,11 +5023,31 @@ lengthbox = cls2(length) if cls1 == cls2 and start == length: lengthbox = startbox # same box! - self.execute_operation(rop.ZERO_ARRAY, - [InputArgRef(a_ref), - startbox, - lengthbox], - 'void', descr=arraydescr) + scale = arraydescr.itemsize + ops = [] + def emit(op): + ops.append(op) + helper = GcRewriterAssembler(None, self.cpu) + helper.emit_op = emit + offset = 0 + scale_start, s_offset, v_start = \ + helper._emit_mul_if_factor_offset_not_supported( + startbox, scale, offset) + if v_start is None: + v_start = ConstInt(s_offset) + scale_len, e_offset, v_len = \ + helper._emit_mul_if_factor_offset_not_supported( + lengthbox, scale, offset) + if v_len is None: + v_len = ConstInt(e_offset) + args = [InputArgRef(a_ref), v_start, v_len, + ConstInt(scale_start), ConstInt(scale_len)] + ops.append(ResOperation(rop.ZERO_ARRAY, args, + descr=arraydescr)) + + scalebox = ConstInt(arraydescr.itemsize) + inputargs, oplist = self._get_operation_list(ops,'void') + self.execute_operations(inputargs, oplist, 'void') assert len(a) == 100 for i in range(100): val = (0 if start <= i < start + length diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1528,25 +1528,6 @@ # return shift - def _get_interiorfield_addr(self, temp_loc, index_loc, itemsize_loc, - base_loc, ofs_loc): - assert isinstance(itemsize_loc, ImmedLoc) - itemsize = itemsize_loc.value - if isinstance(index_loc, ImmedLoc): - temp_loc = imm(index_loc.value * itemsize) - shift = 0 - elif valid_addressing_size(itemsize): - temp_loc = index_loc - shift = get_scale(itemsize) - else: - assert isinstance(index_loc, RegLoc) - assert isinstance(temp_loc, RegLoc) - assert not temp_loc.is_xmm - shift = self._imul_const_scaled(self.mc, temp_loc.value, - index_loc.value, itemsize) - assert isinstance(ofs_loc, ImmedLoc) - return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) - def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -2379,6 +2360,7 @@ shift = self._imul_const_scaled(self.mc, edi.value, varsizeloc.value, itemsize) varsizeloc = edi + # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) assert arraydescr.basesize >= self.gc_minimal_size_in_nursery @@ -2468,13 +2450,8 @@ assert isinstance(null_loc, RegLoc) and null_loc.is_xmm baseofs = baseofs_loc.value nbytes = bytes_loc.value - if valid_addressing_size(itemsize_loc.value): - scale = get_scale(itemsize_loc.value) - else: - assert isinstance(startindex_loc, ImmedLoc) - baseofs += startindex_loc.value * itemsize_loc.value - startindex_loc = imm0 - scale = 0 + assert valid_addressing_size(itemsize_loc.value) + scale = get_scale(itemsize_loc.value) null_reg_cleared = False i = 0 while i < nbytes: diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -9,7 +9,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, - valid_addressing_size) + valid_addressing_size, get_scale) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, IS_X86_64, DEFAULT_FRAME_BYTES) @@ -32,6 +32,7 @@ from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rtyper.lltypesystem import lltype, rffi, rstr from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.jit.backend.x86.regloc import AddressLoc class X86RegisterManager(RegisterManager): @@ -1389,21 +1390,39 @@ def consider_keepalive(self, op): pass + def _scaled_addr(self, index_loc, itemsize_loc, + base_loc, ofs_loc): + assert isinstance(itemsize_loc, ImmedLoc) + itemsize = itemsize_loc.value + if isinstance(index_loc, ImmedLoc): + temp_loc = imm(index_loc.value * itemsize) + shift = 0 + else: + assert valid_addressing_size(itemsize), "rewrite did not correctly handle shift/mul!" + temp_loc = index_loc + shift = get_scale(itemsize) + assert isinstance(ofs_loc, ImmedLoc) + return AddressLoc(base_loc, temp_loc, shift, ofs_loc.value) + def consider_zero_array(self, op): - itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) + _, baseofs, _ = unpack_arraydescr(op.getdescr()) length_box = op.getarg(2) + + scale_box = op.getarg(3) + assert isinstance(scale_box, ConstInt) + start_itemsize = scale_box.value + + len_scale_box = op.getarg(4) + assert isinstance(len_scale_box, ConstInt) + len_itemsize = len_scale_box.value + # rewrite handles the mul of a constant length box + constbytes = -1 if isinstance(length_box, ConstInt): - constbytes = length_box.getint() * itemsize - if constbytes == 0: - return # nothing to do - else: - constbytes = -1 + constbytes = length_box.getint() args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(args[0], args) startindex_loc = self.rm.make_sure_var_in_reg(args[1], args) - if 0 <= constbytes <= 16 * 8 and ( - valid_addressing_size(itemsize) or - isinstance(startindex_loc, ImmedLoc)): + if 0 <= constbytes <= 16 * 8: if IS_X86_64: null_loc = X86_64_XMM_SCRATCH_REG else: @@ -1411,7 +1430,7 @@ null_loc = self.xrm.force_allocate_reg(null_box) self.xrm.possibly_free_var(null_box) self.perform_discard(op, [base_loc, startindex_loc, - imm(constbytes), imm(itemsize), + imm(constbytes), imm(start_itemsize), imm(baseofs), null_loc]) else: # base_loc and startindex_loc are in two regs here (or they are @@ -1421,10 +1440,9 @@ # args[2], because we're still needing the latter. dstaddr_box = TempVar() dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, [args[2]]) - itemsize_loc = imm(itemsize) - dst_addr = self.assembler._get_interiorfield_addr( - dstaddr_loc, startindex_loc, itemsize_loc, - base_loc, imm(baseofs)) + itemsize_loc = imm(start_itemsize) + dst_addr = self._scaled_addr(startindex_loc, itemsize_loc, + base_loc, imm(baseofs)) self.assembler.mc.LEA(dstaddr_loc, dst_addr) # if constbytes >= 0: @@ -1433,15 +1451,15 @@ # load length_loc in a register different than dstaddr_loc length_loc = self.rm.make_sure_var_in_reg(length_box, [dstaddr_box]) - if itemsize > 1: + if len_itemsize > 1: # we need a register that is different from dstaddr_loc, # but which can be identical to length_loc (as usual, # only if the length_box is not used by future operations) bytes_box = TempVar() bytes_loc = self.rm.force_allocate_reg(bytes_box, [dstaddr_box]) - b_adr = self.assembler._get_interiorfield_addr( - bytes_loc, length_loc, itemsize_loc, imm0, imm0) + len_itemsize_loc = imm(len_itemsize) + b_adr = self._scaled_addr(length_loc, len_itemsize_loc, imm0, imm0) self.assembler.mc.LEA(bytes_loc, b_adr) length_box = bytes_box length_loc = bytes_loc diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -9,7 +9,7 @@ ebp, r8, r9, r10, r11, r12, r13, r14, r15, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, AddressLoc) -from rpython.jit.backend.llsupport.regalloc import (get_scale, valid_addressing_size) +from rpython.jit.backend.llsupport.regalloc import get_scale from rpython.jit.metainterp.resoperation import (rop, ResOperation, VectorOp, VectorGuardOp) from rpython.rlib.objectmodel import we_are_translated diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1225,7 +1225,7 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_ARRAY/3d/n', # only emitted by the rewrite, clears (part of) an array + 'ZERO_ARRAY/4d/n', # only emitted by the rewrite, clears (part of) an array # [arraygcptr, firstindex, length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', 'STRSETITEM/3/n', From pypy.commits at gmail.com Mon Dec 28 08:05:43 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 05:05:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: copied stub for zero_array Message-ID: <568133a7.a415c20a.4d933.5fef@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81455:5d9f0ada3241 Date: 2015-12-28 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/5d9f0ada3241/ Log: copied stub for zero_array diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -892,6 +892,81 @@ self.mc.raw_call() self.mc.restore_std_frame() + def emit_zero_array(self, op, arglocs, regalloc): + base_loc, startindex_loc, length_loc, ofs_loc, itemsize_loc = arglocs + + # assume that an array where an item size is N: + # * if N is even, then all items are aligned to a multiple of 2 + # * if N % 4 == 0, then all items are aligned to a multiple of 4 + # * if N % 8 == 0, then all items are aligned to a multiple of 8 + itemsize = itemsize_loc.getint() + if itemsize & 1: stepsize = 1 + elif itemsize & 2: stepsize = 2 + elif itemsize & 4: stepsize = 4 + else: stepsize = WORD + + repeat_factor = itemsize // stepsize + if repeat_factor != 1: + # This is only for itemsize not in (1, 2, 4, WORD). + # Include the repeat_factor inside length_loc if it is a constant + if length_loc.is_imm(): + length_loc = imm(length_loc.value * repeat_factor) + repeat_factor = 1 # included + + unroll = -1 + if length_loc.is_imm(): + if length_loc.value <= 8: + unroll = length_loc.value + if unroll <= 0: + return # nothing to do + + ofs_loc = self._apply_scale(ofs_loc, startindex_loc, itemsize_loc) + ofs_loc = self._copy_in_scratch2(ofs_loc) + + if unroll > 0: + assert repeat_factor == 1 + self.mc.li(r.SCRATCH.value, 0) + self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, + itemsize) + for i in range(1, unroll): + self.eza_stX(r.SCRATCH.value, ofs_loc.value, i * stepsize, + itemsize) + + else: + if length_loc.is_imm(): + self.mc.load_imm(r.SCRATCH, length_loc.value) + length_loc = r.SCRATCH + jz_location = -1 + assert repeat_factor == 1 + else: + self.mc.cmp_op(0, length_loc.value, 0, imm=True) + jz_location = self.mc.currpos() + self.mc.trap() + length_loc = self._multiply_by_constant(length_loc, + repeat_factor, + r.SCRATCH) + self.mc.mtctr(length_loc.value) + self.mc.li(r.SCRATCH.value, 0) + + self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, + itemsize) + bdz_location = self.mc.currpos() + self.mc.trap() + + loop_location = self.mc.currpos() + self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, + itemsize) + self.mc.bdnz(loop_location - self.mc.currpos()) + + pmc = OverwritingBuilder(self.mc, bdz_location, 1) + pmc.bdz(self.mc.currpos() - bdz_location) + pmc.overwrite() + + if jz_location != -1: + pmc = OverwritingBuilder(self.mc, jz_location, 1) + pmc.ble(self.mc.currpos() - jz_location) # !GT + pmc.overwrite() + class ForceOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -901,6 +901,14 @@ else: return self._prepare_call_default(op) + def prepare_zero_array(self, op): + itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) + base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) + startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + length_loc = self.ensure_reg_or_16bit_imm(op.getarg(2)) + ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)] + def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) locs = [] From pypy.commits at gmail.com Mon Dec 28 11:09:01 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 08:09:01 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: first combinations of zero_array are now passing Message-ID: <56815e9d.2968c20a.6d969.0d4d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81458:763785c74cd6 Date: 2015-12-28 17:08 +0100 http://bitbucket.org/pypy/pypy/changeset/763785c74cd6/ Log: first combinations of zero_array are now passing diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5005,11 +5005,12 @@ addr = llmemory.cast_ptr_to_adr(a) a_int = heaptracker.adr2int(addr) a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) - for (start, length) in [(0,100), (49, 49), (1, 98), - (15, 9), (10, 10), (47, 0), - (0, 4)]: + for (start, length) in [(0,100)]:#3, (49, 49), (1, 98), + #(15, 9), (10, 10), (47, 0), + #(0, 4)]: for cls1 in [ConstInt, InputArgInt]: - for cls2 in [ConstInt, InputArgInt]: + for cls2 in [ConstInt]:#[ConstInt, InputArgInt]: + print 'a_ref:', a_ref print 'a_int:', a_int print 'of:', OF print 'start:', cls1.__name__, start diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -77,7 +77,7 @@ return [lr, lq, l1] return f -prepare_int_div= generate_div_mod(False) +prepare_int_div = generate_div_mod(False) prepare_int_mod = generate_div_mod(True) def prepare_int_sub(self, op): diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -129,6 +129,10 @@ 'LA': ('rx', ['\x41']), 'LAY': ('rxy', ['\xE3','\x71']), + # move + 'MVCLE': ('rs', ['\xA8']), + + # load memory 'LMD': ('sse', ['\xEF']), 'LMG': ('rsy_a', ['\xEB','\x04']), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -893,79 +893,31 @@ self.mc.restore_std_frame() def emit_zero_array(self, op, arglocs, regalloc): - base_loc, startindex_loc, length_loc, ofs_loc, itemsize_loc = arglocs + base_loc, startindex_loc, length_loc, \ + ofs_loc, itemsize_loc, pad_byte_loc = arglocs - # assume that an array where an item size is N: - # * if N is even, then all items are aligned to a multiple of 2 - # * if N % 4 == 0, then all items are aligned to a multiple of 4 - # * if N % 8 == 0, then all items are aligned to a multiple of 8 - itemsize = itemsize_loc.getint() - if itemsize & 1: stepsize = 1 - elif itemsize & 2: stepsize = 2 - elif itemsize & 4: stepsize = 4 - else: stepsize = WORD - - repeat_factor = itemsize // stepsize - if repeat_factor != 1: - # This is only for itemsize not in (1, 2, 4, WORD). - # Include the repeat_factor inside length_loc if it is a constant - if length_loc.is_imm(): - length_loc = imm(length_loc.value * repeat_factor) - repeat_factor = 1 # included - - unroll = -1 - if length_loc.is_imm(): - if length_loc.value <= 8: - unroll = length_loc.value - if unroll <= 0: - return # nothing to do - - ofs_loc = self._apply_scale(ofs_loc, startindex_loc, itemsize_loc) - ofs_loc = self._copy_in_scratch2(ofs_loc) - - if unroll > 0: - assert repeat_factor == 1 - self.mc.li(r.SCRATCH.value, 0) - self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, - itemsize) - for i in range(1, unroll): - self.eza_stX(r.SCRATCH.value, ofs_loc.value, i * stepsize, - itemsize) - + if ofs_loc.is_imm(): + self.mc.AGHI(base_loc, ofs_loc) else: - if length_loc.is_imm(): - self.mc.load_imm(r.SCRATCH, length_loc.value) - length_loc = r.SCRATCH - jz_location = -1 - assert repeat_factor == 1 - else: - self.mc.cmp_op(0, length_loc.value, 0, imm=True) - jz_location = self.mc.currpos() - self.mc.trap() - length_loc = self._multiply_by_constant(length_loc, - repeat_factor, - r.SCRATCH) - self.mc.mtctr(length_loc.value) - self.mc.li(r.SCRATCH.value, 0) - - self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, - itemsize) - bdz_location = self.mc.currpos() - self.mc.trap() - - loop_location = self.mc.currpos() - self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, - itemsize) - self.mc.bdnz(loop_location - self.mc.currpos()) - - pmc = OverwritingBuilder(self.mc, bdz_location, 1) - pmc.bdz(self.mc.currpos() - bdz_location) - pmc.overwrite() - - if jz_location != -1: - pmc = OverwritingBuilder(self.mc, jz_location, 1) - pmc.ble(self.mc.currpos() - jz_location) # !GT - pmc.overwrite() + self.mc.AGR(base_loc, ofs_loc) + if ofs_loc.is_imm(): + self.mc.AGHI(base_loc, startindex_loc) + else: + self.mc.AGR(base_loc, startindex_loc) + assert not length_loc.is_imm() + self.mc.SGR(pad_byte_loc, pad_byte_loc) + pad_byte_plus_one = r.odd_reg(pad_byte_loc) + self.mc.SGR(pad_byte_plus_one, pad_byte_plus_one) + self.mc.XGR(r.SCRATCH, r.SCRATCH) + # s390x has memset directly as a hardware instruction!! + # it needs 5 registers allocated + # dst = rX, length = rX+1 (ensured by the regalloc) + # pad_byte is rY to rY+1 + # scratch register holds the value written to dst + assert pad_byte_loc.is_even() + assert base_loc.is_even() + assert length_loc.value == base_loc.value + 1 + self.mc.MVCLE(base_loc, pad_byte_loc, l.addr(0, r.SCRATCH)) class ForceOpAssembler(object): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -132,21 +132,23 @@ off = self.pool.get_offset(c) return l.pool(off) - def ensure_reg(self, box, force_in_reg): + def ensure_reg(self, box, force_in_reg, selected_reg=None): if isinstance(box, Const): offset = self.assembler.pool.get_descr_offset(box) poolloc = l.pool(offset) if force_in_reg: - tmp = TempVar() - self.temp_boxes.append(tmp) - reg = self.force_allocate_reg(tmp) - self.assembler.mc.LG(reg, poolloc) - return reg + if selected_reg is None: + tmp = TempVar() + self.temp_boxes.append(tmp) + selected_reg = self.force_allocate_reg(tmp) + self.assembler.mc.LG(selected_reg, poolloc) + return selected_reg return poolloc else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, - forbidden_vars=self.temp_boxes) + forbidden_vars=self.temp_boxes, + selected_reg=selected_reg) return loc def get_scratch_reg(self): @@ -155,7 +157,7 @@ self.temp_boxes.append(box) return reg - def ensure_even_odd_pair(self, var, bind_first=True, must_exist=True): + def ensure_even_odd_pair(self, var, bind_first=True, must_exist=True, load_loc_odd=True): self._check_type(var) prev_loc = self.loc(var, must_exist=must_exist) var2 = TempVar() @@ -168,9 +170,10 @@ loc, loc2 = self.force_allocate_reg_pair(var2, var, self.temp_boxes) assert loc.is_even() and loc2.is_odd() if prev_loc is not loc2: - # TODO is this true for each op? - # works for division -> if not parametrize - self.assembler.regalloc_mov(prev_loc, loc2) + if load_loc_odd: + self.assembler.regalloc_mov(prev_loc, loc2) + else: + self.assembler.regalloc_mov(prev_loc, loc) return loc, loc2 def force_allocate_reg_pair(self, var, var2, forbidden_vars=[], selected_reg=None): @@ -903,11 +906,18 @@ def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) + base_loc, length_loc = self.rm.ensure_even_odd_pair(op.getarg(0), + bind_first=True, must_exist=False, load_loc_odd=False) + tempvar = TempInt() + self.rm.temp_boxes.append(tempvar) + pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, bind_first=True, must_exist=False) startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - length_loc = self.ensure_reg_or_16bit_imm(op.getarg(2)) + + length_box = op.getarg(2) + length_loc = self.rm.ensure_reg(length_box, force_in_reg=True, + selected_reg=length_loc) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)] + return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize), pad_byte] def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -37,3 +37,7 @@ ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) JITFRAME_FIXED_SIZE = len(ALL_REG_INDEXES) assert JITFRAME_FIXED_SIZE == 32 + +def odd_reg(r): + assert r.value % 2 == 0 + return registers[r.value+1] From pypy.commits at gmail.com Mon Dec 28 11:55:11 2015 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 28 Dec 2015 08:55:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: zero_array nearly passing, on some runs it still fails Message-ID: <5681696f.c74fc20a.c7529.ffffc02d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81459:7ff79cde4286 Date: 2015-12-28 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/7ff79cde4286/ Log: zero_array nearly passing, on some runs it still fails diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -65,6 +65,7 @@ def execute_operations(self, inputargs, operations, result_type): looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) + #import pdb; pdb.set_trace() args = [] for box in inputargs: if box.type == 'i': @@ -5005,12 +5006,12 @@ addr = llmemory.cast_ptr_to_adr(a) a_int = heaptracker.adr2int(addr) a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) - for (start, length) in [(0,100)]:#3, (49, 49), (1, 98), - #(15, 9), (10, 10), (47, 0), - #(0, 4)]: + for (start, length) in [(0,100), (49, 49), (1, 98), + (15, 9), (10, 10), (47, 0), + (0, 4)]: for cls1 in [ConstInt, InputArgInt]: - for cls2 in [ConstInt]:#[ConstInt, InputArgInt]: - print 'a_ref:', a_ref + for cls2 in [ConstInt, InputArgInt]: + print 'ptr:', hex(rffi.cast(lltype.Signed, a_ref)) print 'a_int:', a_int print 'of:', OF print 'start:', cls1.__name__, start @@ -5048,6 +5049,9 @@ scalebox = ConstInt(arraydescr.itemsize) inputargs, oplist = self._get_operation_list(ops,'void') + print("input:", inputargs) + for op in oplist: + print(op) self.execute_operations(inputargs, oplist, 'void') assert len(a) == 100 for i in range(100): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -900,7 +900,7 @@ self.mc.AGHI(base_loc, ofs_loc) else: self.mc.AGR(base_loc, ofs_loc) - if ofs_loc.is_imm(): + if startindex_loc.is_imm(): self.mc.AGHI(base_loc, startindex_loc) else: self.mc.AGR(base_loc, startindex_loc) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -157,7 +157,9 @@ self.temp_boxes.append(box) return reg - def ensure_even_odd_pair(self, var, bind_first=True, must_exist=True, load_loc_odd=True): + def ensure_even_odd_pair(self, var, bind_first=True, + must_exist=True, load_loc_odd=True, + move_regs=True): self._check_type(var) prev_loc = self.loc(var, must_exist=must_exist) var2 = TempVar() @@ -169,7 +171,7 @@ else: loc, loc2 = self.force_allocate_reg_pair(var2, var, self.temp_boxes) assert loc.is_even() and loc2.is_odd() - if prev_loc is not loc2: + if move_regs and prev_loc is not loc2: if load_loc_odd: self.assembler.regalloc_mov(prev_loc, loc2) else: @@ -910,12 +912,14 @@ bind_first=True, must_exist=False, load_loc_odd=False) tempvar = TempInt() self.rm.temp_boxes.append(tempvar) - pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, bind_first=True, must_exist=False) + pad_byte, _ = self.rm.ensure_even_odd_pair(tempvar, + bind_first=True, must_exist=False, move_regs=False) startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) length_box = op.getarg(2) - length_loc = self.rm.ensure_reg(length_box, force_in_reg=True, - selected_reg=length_loc) + ll = self.rm.loc(length_box) + if length_loc is not ll: + self.assembler.regalloc_mov(ll, length_loc) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize), pad_byte] From pypy.commits at gmail.com Mon Dec 28 12:37:49 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Dec 2015 09:37:49 -0800 (PST) Subject: [pypy-commit] pypy default: Make and use NOT_CONSTANT in enforceargs(), not enforcing any Message-ID: <5681736d.4c301c0a.d5f8c.022c@mx.google.com> Author: Armin Rigo Branch: Changeset: r81460:308cca047a01 Date: 2015-12-28 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/308cca047a01/ Log: Make and use NOT_CONSTANT in enforceargs(), not enforcing any specific annotation except making sure it is not a constant. diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -4,7 +4,7 @@ r_dict, UnboxedValue, Symbolic, compute_hash, compute_identity_hash, compute_unique_id, current_object_addr_as_int, we_are_translated, prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, - resizelist_hint, is_annotation_constant, always_inline, + resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) from rpython.translator.translator import TranslationContext, graphof @@ -529,6 +529,18 @@ TYPES = [v.concretetype for v in graph.getargs()] assert TYPES == [lltype.Signed, lltype.Float] +def test_enforceargs_not_constant(): + from rpython.translator.translator import TranslationContext, graphof + @enforceargs(NOT_CONSTANT) + def f(a): + return a + def f42(): + return f(42) + t = TranslationContext() + a = t.buildannotator() + s = a.build_types(f42, []) + assert not hasattr(s, 'const') + def getgraph(f, argtypes): from rpython.translator.translator import TranslationContext, graphof From pypy.commits at gmail.com Mon Dec 28 13:34:25 2015 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Dec 2015 10:34:25 -0800 (PST) Subject: [pypy-commit] pypy default: fix the ppc backend for the new gc_load/gc_store interface Message-ID: <568180b1.a89cc20a.e06c.7c99@mx.google.com> Author: Armin Rigo Branch: Changeset: r81461:e82aaf75618b Date: 2015-12-28 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/e82aaf75618b/ Log: fix the ppc backend for the new gc_load/gc_store interface diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1085,9 +1084,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1110,9 +1109,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 From pypy.commits at gmail.com Mon Dec 28 21:27:38 2015 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 28 Dec 2015 18:27:38 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.11: merge default Message-ID: <5681ef9a.44e21c0a.94d15.ffffcff2@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r81462:d13d8d244813 Date: 2015-12-28 18:26 -0800 http://bitbucket.org/pypy/pypy/changeset/d13d8d244813/ Log: merge default diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -83,10 +86,18 @@ Trivial cleanups in flowspace.operation : fix comment & duplicated method .. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + .. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + .. branch: cpyext-slotdefs .. branch: fix-missing-canraise +.. branch: whatsnew .. branch: fix-2211 -Fix the cryptic exception message when attempting to use extended slicing in rpython +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,7 +4,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, + CANNOT_FAIL) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -380,6 +381,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=CANNOT_FAIL, external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space) diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - size = abs(boxes[3].getint()) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -849,7 +849,7 @@ index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() - size = abs(boxes[5].getint()) + size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,11 +126,11 @@ def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, itemsize, factor, offset): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # - if factor == 1 and offset == 0: - args = [ptr_box, index_box, value_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] newload = ResOperation(rop.GC_STORE, args) else: args = [ptr_box, index_box, value_box, ConstInt(factor), @@ -153,18 +153,15 @@ index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - def _emit_mul_add_if_factor_offset_not_supported(self, index_box, factor, offset): - orig_factor = factor - # factor - must_manually_load_const = False # offset != 0 and not self.cpu.load_constant_offset - if factor != 1 and (factor not in self.cpu.load_supported_factors or \ - (not index_box.is_constant() and must_manually_load_const)): - # enter here if the factor is supported by the cpu - # OR the index is not constant and a new resop must be emitted - # to add the offset - if isinstance(index_box, ConstInt): - index_box = ConstInt(index_box.value * factor) - else: + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu # x & (x - 1) == 0 is a quick test for power of 2 assert factor > 0 if (factor & (factor - 1)) == 0: @@ -174,20 +171,13 @@ index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) self.emit_op(index_box) - factor = 1 - # adjust the constant offset - #if must_manually_load_const: - # if isinstance(index_box, ConstInt): - # index_box = ConstInt(index_box.value + offset) - # else: - # index_box = ResOperation(rop.INT_ADD, [index_box, ConstInt(offset)]) - # self.emit_op(index_box) - # offset = 0 - return factor, offset, index_box + factor = 1 + return factor, offset, index_box - def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, factor, offset, sign, type='i'): + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # if sign: @@ -197,8 +187,8 @@ optype = type if op is not None: optype = op.type - if factor == 1 and offset == 0: - args = [ptr_box, index_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] newload = ResOperation(OpHelpers.get_gc_load(optype), args) else: args = [ptr_box, index_box, ConstInt(factor), @@ -547,9 +537,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], descr=descrs.arraydescr) @@ -560,9 +549,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) @@ -612,15 +600,12 @@ descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - # emit GC_LOAD_INDEXED - itemsize, basesize, _ = unpack_arraydescr(descr) - factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(ConstInt(index), - itemsize, basesize) - args = [frame, index_box, arg, ConstInt(factor), - ConstInt(offset), ConstInt(itemsize)] - self.emit_op(ResOperation(rop.GC_STORE_INDEXED, args)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) descr = op.getdescr() assert isinstance(descr, JitCellToken) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -30,13 +30,26 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): - def trans_getarray_to_load(descr): - size = descr.basesize - if descr.is_item_signed(): - size = -size - return ','.join([str(n) for n in [descr.itemsize, - descr.basesize, - size]]) + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) @@ -376,7 +389,7 @@ gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) gc_store(p2, 0, 1234, 8) - gc_store(p1, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -485,7 +498,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - gc_store_indexed(p0, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -611,19 +624,19 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, 14, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p1, 0, 10, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - gc_store_indexed(p2, 0, i2, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - gc_store_indexed(p3, 0, i2, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -636,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -650,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -671,7 +684,7 @@ zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,7 +706,7 @@ zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -705,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -725,7 +738,7 @@ zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -743,12 +756,12 @@ size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - gc_store_indexed(p1, 0, p2, %(scale)s, %(offset)s, %(size)s) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) """, interiorzdescr=interiorzdescr, scale=scale, offset=offset, size=size) @@ -763,7 +776,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -781,7 +794,7 @@ p1 = nursery_ptr_increment(p0, %(tdescr.size)d) gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -798,7 +811,7 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, i2, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -816,8 +829,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - gc_store_indexed(p0, 1, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -835,8 +848,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -855,9 +868,9 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -878,11 +891,11 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -901,10 +914,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -923,10 +936,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -955,7 +968,7 @@ gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(bdescr.basesize)s, 1) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -991,10 +1004,10 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - gc_store_indexed(p1, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1012,7 +1025,7 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1025,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) - gc_store_indexed(p0, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -1036,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_size.field_size)s) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) - i2 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_depth.field_size)s) - gc_store_indexed(p1, 0, 0, 1, 1, %(jf_extra_stack_depth.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_savedata.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_force_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_guard_exc.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_forward.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s gc_store(p1, 0, i2, %(framelendescr.field_size)s) - gc_store_indexed(p1, 0, ConstClass(frame_info), 1, 1, %(jf_frame_info.field_size)s) - gc_store_indexed(p1, 0, i0, 8, 3, 8) - gc_store_indexed(p1, 1, f0, 8, 5, 8) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1101,7 +1114,7 @@ p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = gc_load_indexed_r(p0, 0, 1, %(tzdescr.field_size)s, %(tzdescr.field_size)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1155,23 +1168,19 @@ # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_raw(p0,i1,descr=ydescr)' '->' - 'i3 = gc_store_indexed(p0,0,i1,1,' - '%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_gc(p0,p0,descr=zdescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' 'cond_call_gc_wb(p0, descr=wbdescr);' - 'i3 = gc_store_indexed(p0,0,p0,1,' - '%(zdescr.offset)s,%(zdescr.field_size)s)'], + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], #[False, (1,), 'i3 = strlen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], [True, (1,), 'i3 = strlen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' @@ -1179,7 +1188,7 @@ # '%(unicodelendescr.offset)s,' # '%(unicodelendescr.field_size)s)'], [True, (1,), 'i3 = unicodelen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1039,7 +1038,8 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1061,7 +1061,8 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1083,9 +1084,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1108,9 +1109,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1204,8 +1204,12 @@ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- # same paramters as GC_LOAD, but one additional for the value to store - # note that the itemsize is not signed! + # note that the itemsize is not signed (always > 0) # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant 'GC_STORE/4d/n', 'GC_STORE_INDEXED/6d/n', diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -4,7 +4,7 @@ r_dict, UnboxedValue, Symbolic, compute_hash, compute_identity_hash, compute_unique_id, current_object_addr_as_int, we_are_translated, prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, - resizelist_hint, is_annotation_constant, always_inline, + resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) from rpython.translator.translator import TranslationContext, graphof @@ -529,6 +529,18 @@ TYPES = [v.concretetype for v in graph.getargs()] assert TYPES == [lltype.Signed, lltype.Float] +def test_enforceargs_not_constant(): + from rpython.translator.translator import TranslationContext, graphof + @enforceargs(NOT_CONSTANT) + def f(a): + return a + def f42(): + return f(42) + t = TranslationContext() + a = t.buildannotator() + s = a.build_types(f42, []) + assert not hasattr(s, 'const') + def getgraph(f, argtypes): from rpython.translator.translator import TranslationContext, graphof diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -9,6 +9,8 @@ from subprocess import PIPE, Popen def run_subprocess(executable, args, env=None, cwd=None): + if isinstance(args, list): + args = [a.encode('latin1') for a in args] return _run(executable, args, env, cwd) shell_default = False From pypy.commits at gmail.com Mon Dec 28 23:29:03 2015 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 28 Dec 2015 20:29:03 -0800 (PST) Subject: [pypy-commit] pypy stdlib-2.7.11: bytes.__alloc__ is an impl detail Message-ID: <56820c0f.6a69c20a.d649d.5e68@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r81463:dc7fb43b5d37 Date: 2015-12-28 20:28 -0800 http://bitbucket.org/pypy/pypy/changeset/dc7fb43b5d37/ Log: bytes.__alloc__ is an impl detail diff --git a/lib-python/2.7/test/test_bytes.py b/lib-python/2.7/test/test_bytes.py --- a/lib-python/2.7/test/test_bytes.py +++ b/lib-python/2.7/test/test_bytes.py @@ -727,6 +727,7 @@ if alloc not in seq: seq.append(alloc) + @test.test_support.impl_detail("undocumented bytes.__alloc__()") def test_init_alloc(self): b = bytearray() def g(): From pypy.commits at gmail.com Tue Dec 29 08:06:36 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:36 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: new branch Message-ID: <5682855c.ca061c0a.85ab3.07b9@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81464:af3b098d0960 Date: 2015-12-19 09:46 +0100 http://bitbucket.org/pypy/pypy/changeset/af3b098d0960/ Log: new branch From pypy.commits at gmail.com Tue Dec 29 08:06:38 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:38 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add a failing test for issue #1674 Message-ID: <5682855e.a85fc20a.99e77.11a8@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81465:ed761ed18fa8 Date: 2015-12-19 11:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ed761ed18fa8/ Log: Add a failing test for issue #1674 diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3251,6 +3251,16 @@ assert array(x, copy=False) is x assert array(x, copy=True) is not x + def test_issue_1674(self): + import numpy as np + data = np.arange(15).reshape(3, 5) * 1.0 + # array([[ 0., 1., 2., 3., 4.], + # [ 5., 6., 7., 8., 9.], + # [ 10., 11., 12., 13., 14.]]) + m = data[:, 0] % 2 == 0 + # array([ True, False, True], dtype=bool) + assert data[m, 0] == array([ 0., 10.]) + def test_ravel(self): from numpy import arange assert (arange(3).ravel() == arange(3)).all() From pypy.commits at gmail.com Tue Dec 29 08:06:40 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:40 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Minimize importing Message-ID: <56828560.41dfc20a.752aa.1b7a@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81466:ab0bd18d4930 Date: 2015-12-19 11:35 +0100 http://bitbucket.org/pypy/pypy/changeset/ab0bd18d4930/ Log: Minimize importing diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3252,8 +3252,8 @@ assert array(x, copy=True) is not x def test_issue_1674(self): - import numpy as np - data = np.arange(15).reshape(3, 5) * 1.0 + from numpy import arange + data = arange(15).reshape(3, 5) * 1.0 # array([[ 0., 1., 2., 3., 4.], # [ 5., 6., 7., 8., 9.], # [ 10., 11., 12., 13., 14.]]) From pypy.commits at gmail.com Tue Dec 29 08:06:42 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:42 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Explain test Message-ID: <56828562.4a5ec20a.af75e.fffff2aa@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81467:83a721471393 Date: 2015-12-19 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/83a721471393/ Log: Explain test diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3252,6 +3252,7 @@ assert array(x, copy=True) is not x def test_issue_1674(self): + # Mask indexing does not work with a second dimension from numpy import arange data = arange(15).reshape(3, 5) * 1.0 # array([[ 0., 1., 2., 3., 4.], From pypy.commits at gmail.com Tue Dec 29 08:06:47 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:47 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Remove duplicated code (probably mismerged) Message-ID: <56828567.87591c0a.d290e.fffffc6d@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81470:c3da9df0e2ae Date: 2015-12-19 13:19 +0100 http://bitbucket.org/pypy/pypy/changeset/c3da9df0e2ae/ Log: Remove duplicated code (probably mismerged) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -806,7 +806,6 @@ indexlen = len(indexes_w) dtype = arr.get_dtype() shape_iter = PureShapeIter(iter_shape, indexes_w) - indexlen = len(indexes_w) while not shape_iter.done(): getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, dtype=dtype, prefixlen=prefixlen) From pypy.commits at gmail.com Tue Dec 29 08:06:43 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:43 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add missing import Message-ID: <56828563.41dfc20a.752aa.1b85@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81468:ca41c0fd4a7e Date: 2015-12-19 12:03 +0100 http://bitbucket.org/pypy/pypy/changeset/ca41c0fd4a7e/ Log: Add missing import diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3253,7 +3253,7 @@ def test_issue_1674(self): # Mask indexing does not work with a second dimension - from numpy import arange + from numpy import arange, array data = arange(15).reshape(3, 5) * 1.0 # array([[ 0., 1., 2., 3., 4.], # [ 5., 6., 7., 8., 9.], From pypy.commits at gmail.com Tue Dec 29 08:06:51 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:51 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Document branch Message-ID: <5682856b.85e41c0a.a3d56.5588@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81472:27cc5b303584 Date: 2015-12-20 11:09 +0100 http://bitbucket.org/pypy/pypy/changeset/27cc5b303584/ Log: Document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,6 +68,10 @@ Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the commonly known loading operations +.. branch: fix-1674 + +Fix for broken mask indexing / selection. Was issue #1674. + .. branch: more-rposix Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and From pypy.commits at gmail.com Tue Dec 29 08:06:45 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:45 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Do not use python iter() builtin as variable name Message-ID: <56828565.e935c20a.161dc.fffff23e@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81469:4d65b43565ad Date: 2015-12-19 13:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4d65b43565ad/ Log: Do not use python iter() builtin as variable name diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -805,22 +805,22 @@ prefixlen = len(prefix_w) indexlen = len(indexes_w) dtype = arr.get_dtype() - iter = PureShapeIter(iter_shape, indexes_w) + shape_iter = PureShapeIter(iter_shape, indexes_w) indexlen = len(indexes_w) - while not iter.done(): + while not shape_iter.done(): getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, dtype=dtype, prefixlen=prefixlen) # prepare the index index_w = [None] * indexlen for i in range(indexlen): - if iter.idx_w_i[i] is not None: - index_w[i] = iter.idx_w_i[i].getitem(iter.idx_w_s[i]) + if shape_iter.idx_w_i[i] is not None: + index_w[i] = shape_iter.idx_w_i[i].getitem(shape_iter.idx_w_s[i]) else: index_w[i] = indexes_w[i] res.descr_setitem(space, space.newtuple(prefix_w[:prefixlen] + - iter.get_index(space, shapelen)), + shape_iter.get_index(space, shapelen)), arr.descr_getitem(space, space.newtuple(index_w))) - iter.next() + shape_iter.next() return res setitem_int_driver = jit.JitDriver(name = 'numpy_setitem_int', From pypy.commits at gmail.com Tue Dec 29 08:06:52 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:52 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Remove unused variable Message-ID: <5682856c.d69c1c0a.77c16.ffffc5c5@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81473:db42d5d777fb Date: 2015-12-20 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/db42d5d777fb/ Log: Remove unused variable diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -256,7 +256,6 @@ elif space.is_w(w_idx, space.w_None): return [NewAxisChunk(), EllipsisChunk()] result = [] - i = 0 has_ellipsis = False for w_item in space.fixedview(w_idx): if space.is_w(w_item, space.w_Ellipsis): @@ -271,10 +270,8 @@ result.append(NewAxisChunk()) elif space.isinstance_w(w_item, space.w_slice): result.append(SliceChunk(w_item)) - i += 1 else: result.append(IntegerChunk(w_item)) - i += 1 if not has_ellipsis: result.append(EllipsisChunk()) return result From pypy.commits at gmail.com Tue Dec 29 08:06:49 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:49 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Enhance comment about mask indexing Message-ID: <56828569.c4b1c20a.b3ce9.0b19@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81471:439ca3544c0c Date: 2015-12-19 13:21 +0100 http://bitbucket.org/pypy/pypy/changeset/439ca3544c0c/ Log: Enhance comment about mask indexing diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -684,7 +684,8 @@ arr_iter, arr_state = arr.create_iter() arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() - # support the deprecated form where arr([True]) will return arr[0, ...] + # support the deprecated form where + # arr[[True, False, True, ...]] will return arr[0, 2, ...] # by iterating over res_iter, not index_iter while not res_iter.done(res_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, From pypy.commits at gmail.com Tue Dec 29 08:06:54 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:54 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add test case: dual indexing mask+selection in the other order Message-ID: <5682856e.42b81c0a.90f79.fffff0ca@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81474:2d9b651eafea Date: 2015-12-20 13:22 +0100 http://bitbucket.org/pypy/pypy/changeset/2d9b651eafea/ Log: Add test case: dual indexing mask+selection in the other order diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3261,6 +3261,8 @@ m = data[:, 0] % 2 == 0 # array([ True, False, True], dtype=bool) assert data[m, 0] == array([ 0., 10.]) + # Assume False for missing elements of the bool index array + assert data[0, m] == array([ 0., 2.]) def test_ravel(self): from numpy import arange From pypy.commits at gmail.com Tue Dec 29 08:06:58 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:58 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add test case: dual indexing integer array+selection Message-ID: <56828572.87c21c0a.faf38.fffff85c@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81476:13b371329caa Date: 2015-12-20 23:11 +0100 http://bitbucket.org/pypy/pypy/changeset/13b371329caa/ Log: Add test case: dual indexing integer array+selection diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3264,6 +3264,21 @@ # Assume False for missing elements of the bool index array assert data[0, m] == array([ 0., 2.]) + def test_dual_indexing_selecting(self): + from numpy import arange, array + data = arange(15).reshape(5, 3) * 1.0 + assert (data == array([[ 0., 1., 2.], + [ 3., 4., 5.], + [ 6., 7., 8.], + [ 9., 10., 11.], + [ 12., 13., 14.]])).all() + m = array([3, 4, 1]) + assert (data[m] == array([[ 9., 10., 11.], + [ 12., 13., 14.], + [ 3., 4., 5.]])).all() + assert data[m, 0] == array([ 9., 12., 3.]) + assert data[array([1,3,4,1]), 1] == array([4., 10., 13., 4.]) + def test_ravel(self): from numpy import arange assert (arange(3).ravel() == arange(3)).all() From pypy.commits at gmail.com Tue Dec 29 08:07:00 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:00 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Fix test use .all() Message-ID: <56828574.aa5dc20a.74c0.ffffc26e@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81477:cd0a7e918cd4 Date: 2015-12-20 23:42 +0100 http://bitbucket.org/pypy/pypy/changeset/cd0a7e918cd4/ Log: Fix test use .all() diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3276,8 +3276,8 @@ assert (data[m] == array([[ 9., 10., 11.], [ 12., 13., 14.], [ 3., 4., 5.]])).all() - assert data[m, 0] == array([ 9., 12., 3.]) - assert data[array([1,3,4,1]), 1] == array([4., 10., 13., 4.]) + assert (data[m, 0] == array([ 9., 12., 3.])).all() + assert (data[array([1, 3, 4, 1]), 1] == array([4., 10., 13., 4.])).all() def test_ravel(self): from numpy import arange From pypy.commits at gmail.com Tue Dec 29 08:06:56 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:06:56 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Assert partial data correcteness during test Message-ID: <56828570.95151c0a.96895.ffffef0d@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81475:6e03bbc50405 Date: 2015-12-20 15:19 +0100 http://bitbucket.org/pypy/pypy/changeset/6e03bbc50405/ Log: Assert partial data correcteness during test diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3255,11 +3255,11 @@ # Mask indexing does not work with a second dimension from numpy import arange, array data = arange(15).reshape(3, 5) * 1.0 - # array([[ 0., 1., 2., 3., 4.], - # [ 5., 6., 7., 8., 9.], - # [ 10., 11., 12., 13., 14.]]) + assert (data == array([[ 0., 1., 2., 3., 4.], + [ 5., 6., 7., 8., 9.], + [ 10., 11., 12., 13., 14.]])).all() m = data[:, 0] % 2 == 0 - # array([ True, False, True], dtype=bool) + assert (m == array([ True, False, True], dtype=bool)).all() assert data[m, 0] == array([ 0., 10.]) # Assume False for missing elements of the bool index array assert data[0, m] == array([ 0., 2.]) From pypy.commits at gmail.com Tue Dec 29 08:07:04 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:04 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Move array indexing + slice into its own test Message-ID: <56828578.247bc20a.5fe7e.fffff4a2@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81479:5cc3ef540d5d Date: 2015-12-23 15:29 +0100 http://bitbucket.org/pypy/pypy/changeset/5cc3ef540d5d/ Log: Move array indexing + slice into its own test diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3405,8 +3405,7 @@ from numpy import array assert (array([])[[]] == []).all() a = array([[1, 2], [3, 4], [5, 6]]) - assert (a[slice(0, 3), [0, 0]] == [[1, 1], [3, 3], [5, 5]]).all() - assert (a[array([0, 2]), slice(0, 2)] == [[1, 2], [5, 6]]).all() + b = a[array([0, 0])] assert (b == [[1, 2], [1, 2]]).all() assert (a[[[0, 1], [0, 0]]] == array([1, 3])).all() @@ -3414,6 +3413,12 @@ assert (a[array([0, 2]), 1] == [2, 6]).all() assert (a[array([0, 2]), array([1])] == [2, 6]).all() + def test_int_array_index_n_slice(self): + from numpy import array + a = array([[1, 2], [3, 4], [5, 6]]) + assert (a[slice(0, 3), [0, 0]] == [[1, 1], [3, 3], [5, 5]]).all() + assert (a[array([0, 2]), slice(0, 2)] == [[1, 2], [5, 6]]).all() + def test_int_array_index_setitem(self): from numpy import array a = array([[1, 2], [3, 4], [5, 6]]) From pypy.commits at gmail.com Tue Dec 29 08:07:05 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:05 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Make test look consistent with other ones Message-ID: <56828579.17941c0a.1c63a.4bd7@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81480:a9d3428f8847 Date: 2015-12-23 15:30 +0100 http://bitbucket.org/pypy/pypy/changeset/a9d3428f8847/ Log: Make test look consistent with other ones diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3406,8 +3406,7 @@ assert (array([])[[]] == []).all() a = array([[1, 2], [3, 4], [5, 6]]) - b = a[array([0, 0])] - assert (b == [[1, 2], [1, 2]]).all() + assert (a[array([0, 0])] == [[1, 2], [1, 2]]).all() assert (a[[[0, 1], [0, 0]]] == array([1, 3])).all() assert (a[array([0, 2])] == [[1, 2], [5, 6]]).all() assert (a[array([0, 2]), 1] == [2, 6]).all() From pypy.commits at gmail.com Tue Dec 29 08:07:02 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:02 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Fix test: use .all() Message-ID: <56828576.a89cc20a.e06c.ffff9b16@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81478:1ce0385fef52 Date: 2015-12-22 19:38 +0100 http://bitbucket.org/pypy/pypy/changeset/1ce0385fef52/ Log: Fix test: use .all() diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3252,7 +3252,7 @@ assert array(x, copy=True) is not x def test_issue_1674(self): - # Mask indexing does not work with a second dimension + # Mask indexing does not work with a second index from numpy import arange, array data = arange(15).reshape(3, 5) * 1.0 assert (data == array([[ 0., 1., 2., 3., 4.], @@ -3260,9 +3260,11 @@ [ 10., 11., 12., 13., 14.]])).all() m = data[:, 0] % 2 == 0 assert (m == array([ True, False, True], dtype=bool)).all() - assert data[m, 0] == array([ 0., 10.]) + assert (data[m] == array([[ 0., 1., 2., 3., 4.], + [ 10., 11., 12., 13., 14.]])).all() + assert (data[m, 0] == array([ 0., 10.])).all() # Assume False for missing elements of the bool index array - assert data[0, m] == array([ 0., 2.]) + assert (data[0, m] == array([ 0., 2.])).all() def test_dual_indexing_selecting(self): from numpy import arange, array From pypy.commits at gmail.com Tue Dec 29 08:07:07 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:07 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add more test cases Message-ID: <5682857b.2467c20a.aad21.ffffeaf5@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81481:905da6af2d4c Date: 2015-12-23 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/905da6af2d4c/ Log: Add more test cases diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3406,8 +3406,14 @@ assert (array([])[[]] == []).all() a = array([[1, 2], [3, 4], [5, 6]]) + assert (a[[0, 0]] == [[1, 2], [1, 2]]).all() assert (a[array([0, 0])] == [[1, 2], [1, 2]]).all() + assert (a[array([0, 0]), 0] == [[1, 1]]).all() + assert (a[array([0, 0]), [0]] == [[1, 1]]).all() + assert (a[array([0, 0]), array([0])] == [[1, 1]]).all() + assert (a[[[0, 1], [0, 0]]] == array([1, 3])).all() + assert (a[array([0, 2])] == [[1, 2], [5, 6]]).all() assert (a[array([0, 2]), 1] == [2, 6]).all() assert (a[array([0, 2]), array([1])] == [2, 6]).all() From pypy.commits at gmail.com Tue Dec 29 08:07:09 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:09 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add more test cases Message-ID: <5682857d.863f1c0a.cca08.29ef@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81482:df763fde4b86 Date: 2015-12-23 16:07 +0100 http://bitbucket.org/pypy/pypy/changeset/df763fde4b86/ Log: Add more test cases diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3263,8 +3263,12 @@ assert (data[m] == array([[ 0., 1., 2., 3., 4.], [ 10., 11., 12., 13., 14.]])).all() assert (data[m, 0] == array([ 0., 10.])).all() + assert (data[m, 0:1] == array([[0.], [10.]])).all() + assert (data[m, 0:1] == array([[0.], [10.]])).all() + assert (data[..., m, 0:1] == array([[0.], [10.]])).all() # Assume False for missing elements of the bool index array assert (data[0, m] == array([ 0., 2.])).all() + assert (data[0, m, None] == array([[0.], [2.]])).all() def test_dual_indexing_selecting(self): from numpy import arange, array From pypy.commits at gmail.com Tue Dec 29 08:07:10 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:10 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add more test cases from numpy doc Message-ID: <5682857e.53ad1c0a.cd9fe.ffff8791@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81483:bd0b3e017721 Date: 2015-12-23 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/bd0b3e017721/ Log: Add more test cases from numpy doc 1 fail 1 pass diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3285,6 +3285,40 @@ assert (data[m, 0] == array([ 9., 12., 3.])).all() assert (data[array([1, 3, 4, 1]), 1] == array([4., 10., 13., 4.])).all() + def test_multiple_advanced_integer_indexing(self): + # Taken from: + # http://docs.scipy.org/doc/numpy-1.10.0/reference/arrays.indexing.html#advanced-indexing + from numpy import array, intp, arange + x = arange(12).reshape(4, 3) + assert (x == array([[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8], + [ 9, 10, 11]])).all() + rows = array([[0, 0], + [3, 3]], dtype=intp) + columns = array([[0, 2], + [0, 2]], dtype=intp) + assert (x[(rows, columns)] == array([[ 0, 2], [ 9, 11]])).all() + assert (x[1:2, 1:3] == array([[4, 5]])).all() + assert (x[1:2, [1, 2]] == array([[4, 5]])).all() + + def test_multiple_advanced_indexing(self): + # Taken from: + # http://docs.scipy.org/doc/numpy-1.10.0/reference/arrays.indexing.html#advanced-indexing + from numpy import array, newaxis + x = array([[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8], + [ 9, 10, 11]]) + rows = ((x.sum(-1) % 2) == 0) + assert (rows == array([False, True, False, True], dtype=bool)).all() + rows = rows.nonzero()[0] + assert (rows == array([1, 3])).all() + columns = [0, 2] + assert (x[rows[:, newaxis], columns] == array([[ 3, 5], [ 9, 11]])).all() + columns = array([0, 2]) + assert (x[rows[:, newaxis], columns] == array([[ 3, 5], [ 9, 11]])).all() + def test_ravel(self): from numpy import arange assert (arange(3).ravel() == arange(3)).all() From pypy.commits at gmail.com Tue Dec 29 08:07:14 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:14 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: typo: missing char Message-ID: <56828582.6918c20a.deb73.57ae@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81485:178b35c5cbc0 Date: 2015-12-24 12:28 +0100 http://bitbucket.org/pypy/pypy/changeset/178b35c5cbc0/ Log: typo: missing char diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -199,7 +199,7 @@ reds='auto') def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): - # out must hav been built. func needs no calc_type, is usually an + # out must have been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) in_iters = [None] * nin From pypy.commits at gmail.com Tue Dec 29 08:07:16 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:16 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add a failing test for issue 1717 Message-ID: <56828584.6918c20a.deb73.57b0@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81486:0c6658a532bc Date: 2015-12-28 12:40 +0100 http://bitbucket.org/pypy/pypy/changeset/0c6658a532bc/ Log: Add a failing test for issue 1717 diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3321,6 +3321,13 @@ columns = array([0, 2]) assert (x[rows[:, newaxis], columns] == array([[ 3, 5], [ 9, 11]])).all() + def test_issue_1717(self): + from numpy import array + a = array((1, 5)).reshape(2,1) + assert (a == array([[1], [5]])).all() + a[0, array([True], dtype=bool)] = 100 + assert (a == array([[100], [5]])).all() + def test_ravel(self): from numpy import arange assert (arange(3).ravel() == arange(3)).all() From pypy.commits at gmail.com Tue Dec 29 08:07:18 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:18 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Better comment for array filtering Message-ID: <56828586.01941c0a.aecc6.224c@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81487:9a96c6616c0e Date: 2015-12-28 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/9a96c6616c0e/ Log: Better comment for array filtering diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -685,7 +685,7 @@ arr_dtype = arr.get_dtype() index_dtype = index.get_dtype() # support the deprecated form where - # arr[[True, False, True, ...]] will return arr[0, 2, ...] + # array([True, False, True, ...]) will return array([0, 2, ...]) # by iterating over res_iter, not index_iter while not res_iter.done(res_state): getitem_filter_driver.jit_merge_point(shapelen=shapelen, From pypy.commits at gmail.com Tue Dec 29 08:07:12 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:12 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add more test cases Message-ID: <56828580.87591c0a.d290e.fffffc8d@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81484:63607714b6bf Date: 2015-12-23 17:55 +0100 http://bitbucket.org/pypy/pypy/changeset/63607714b6bf/ Log: Add more test cases diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3263,11 +3263,13 @@ assert (data[m] == array([[ 0., 1., 2., 3., 4.], [ 10., 11., 12., 13., 14.]])).all() assert (data[m, 0] == array([ 0., 10.])).all() + # More of the same type of cases assert (data[m, 0:1] == array([[0.], [10.]])).all() - assert (data[m, 0:1] == array([[0.], [10.]])).all() + assert (data[m, 0:1, ...] == array([[0.], [10.]])).all() assert (data[..., m, 0:1] == array([[0.], [10.]])).all() # Assume False for missing elements of the bool index array assert (data[0, m] == array([ 0., 2.])).all() + assert (data[0:1, m] == array([[ 0., 2.]])).all() assert (data[0, m, None] == array([[0.], [2.]])).all() def test_dual_indexing_selecting(self): From pypy.commits at gmail.com Tue Dec 29 08:07:20 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:20 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Update what's new Message-ID: <56828588.11181c0a.14d96.ffffccd1@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81488:7f3474f2e21d Date: 2015-12-29 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/7f3474f2e21d/ Log: Update what's new diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -70,7 +70,7 @@ .. branch: fix-1674 -Fix for broken mask indexing / selection. Was issue #1674. +Fix for broken mask indexing / selection. Was: issues #1674 & #1717. .. branch: more-rposix From pypy.commits at gmail.com Tue Dec 29 08:07:23 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:23 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Add even more test cases Message-ID: <5682858b.89dec20a.7b97c.0987@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81490:cc2bb5dd979c Date: 2015-12-29 11:06 +0100 http://bitbucket.org/pypy/pypy/changeset/cc2bb5dd979c/ Log: Add even more test cases diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3263,14 +3263,23 @@ assert (data[m] == array([[ 0., 1., 2., 3., 4.], [ 10., 11., 12., 13., 14.]])).all() assert (data[m, 0] == array([ 0., 10.])).all() + assert (data[(m, 0, Ellipsis)] == array([ 0., 10.])).all() + assert (data[m, 0, ...] == array([ 0., 10.])).all() + assert (data[m, ..., 0] == array([ 0., 10.])).all() + assert (data[..., m, 0] == array([ 0., 10.])).all() + # More of the same type of cases assert (data[m, 0:1] == array([[0.], [10.]])).all() assert (data[m, 0:1, ...] == array([[0.], [10.]])).all() + assert (data[m, ..., 0:1] == array([[0.], [10.]])).all() assert (data[..., m, 0:1] == array([[0.], [10.]])).all() # Assume False for missing elements of the bool index array assert (data[0, m] == array([ 0., 2.])).all() assert (data[0:1, m] == array([[ 0., 2.]])).all() assert (data[0, m, None] == array([[0.], [2.]])).all() + assert (data[0:1, m, None] == array([[[0.], [2.]]])).all() + assert (data[None, m, 0:1] == array([[[0.], [10.]]])).all() + assert (data[m, m] == array([ 0., 12.])).all() def test_dual_indexing_selecting(self): from numpy import arange, array From pypy.commits at gmail.com Tue Dec 29 08:07:21 2015 From: pypy.commits at gmail.com (Vincent Legoll) Date: Tue, 29 Dec 2015 05:07:21 -0800 (PST) Subject: [pypy-commit] pypy fix-1674: Remove useless "pass" instruction Message-ID: <56828589.a658c20a.49ee6.043c@mx.google.com> Author: Vincent Legoll Branch: fix-1674 Changeset: r81489:777c771712dc Date: 2015-12-29 10:50 +0100 http://bitbucket.org/pypy/pypy/changeset/777c771712dc/ Log: Remove useless "pass" instruction diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -8,7 +8,6 @@ class BaseChunk(object): _attrs_ = ['step','out_dim'] - pass class Chunk(BaseChunk): From pypy.commits at gmail.com Tue Dec 29 10:37:02 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 29 Dec 2015 07:37:02 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added guard_exception to regalloc+assembler Message-ID: <5682a89e.6650c20a.a11d6.3d2c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81491:5670303748ca Date: 2015-12-29 15:07 +0100 http://bitbucket.org/pypy/pypy/changeset/5670303748ca/ Log: added guard_exception to regalloc+assembler diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5049,6 +5049,7 @@ scalebox = ConstInt(arraydescr.itemsize) inputargs, oplist = self._get_operation_list(ops,'void') + # XXX print("input:", inputargs) for op in oplist: print(op) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -680,7 +680,7 @@ offset2 = self.cpu.subclassrange_min_offset if offset is not None: # read this field to get the vtable pointer - self.mc.load(r.SCRATCH2.value, loc_object.value, offset) + self.mc(r.SCRATCH2, l.addr(offset, loc_object)) # read the vtable's subclassrange_min field assert check_imm(offset2) self.mc.ld(r.SCRATCH2.value, r.SCRATCH2.value, offset2) @@ -729,6 +729,30 @@ self._store_force_index(op) self.store_info_on_descr(0, guard_token) + def emit_guard_exception(self, op, arglocs, regalloc): + loc, resloc = arglocs[:2] + failargs = arglocs[2:] + + mc = self.mc + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + + mc.LG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) + if not loc.is_in_pool() and loc.is_imm(): + mc.cmp_op(r.SCRATCH2, loc, imm=True) + else: + mc.cmp_op(r.SCRATCH2, loc, pool=loc.is_in_pool()) + self.guard_success_cc = c.EQ + self._emit_guard(op, failargs) + + if resloc: + mc.load(resloc, r.SCRATCH, 0) + mc.LGHI(r.SCRATCH2, l.imm(0)) + mc.SG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.SG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) + + class MemoryOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1031,6 +1031,15 @@ locs = self._prepare_guard(op) return locs + def prepare_guard_exception(self, op): + loc = self.ensure_reg(op.getarg(0)) + if op in self.longevity: + resloc = self.force_allocate_reg(op) + else: + resloc = None + arglocs = self._prepare_guard(op, [loc, resloc]) + return arglocs + def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) dst_ptr_loc = self.ensure_reg(op.getarg(1), force_in_reg=True) From pypy.commits at gmail.com Tue Dec 29 10:37:04 2015 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 29 Dec 2015 07:37:04 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added save/restore exception impl Message-ID: <5682a8a0.aa5dc20a.74c0.fffff32a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81492:4fbd38846596 Date: 2015-12-29 16:36 +0100 http://bitbucket.org/pypy/pypy/changeset/4fbd38846596/ Log: added save/restore exception impl diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -423,7 +423,42 @@ return mc.materialize(self.cpu, []) def _build_stack_check_slowpath(self): - pass # TODO + _, _, slowpathaddr = self.cpu.insert_stack_check() + if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: + return # no stack check (for tests, or non-translated) + # + # make a regular function that is called from a point near the start + # of an assembler function (after it adjusts the stack and saves + # registers). + mc = InstrBuilder() + # + mc.STG(r.r14, l.addr(14*WORD, r.SP)) + # Do the call + # use SP as single parameter for the call + mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain + mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + mc.LGR(r.r2, r.SP) + mc.load_imm(mc.RAW_CALL_REG, slowpathaddr) + mc.raw_call() + mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + # + # Check if it raised StackOverflow + mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) + mc.LG(r.SCRATCH, l.addr(0, r.SCRATCH)) + # if this comparison is true, then everything is ok, + # else we have an exception + mc.cmp_op(r.SCRATCH, 0, imm=True) + # + # So we return to our caller, conditionally if "EQ" + mc.LG(r.r14, l.addr(14*WORD, r.SP)) + mc.BCR(c.EQ, r.r14) + # + # Else, jump to propagate_exception_path + assert self.propagate_exception_path + mc.b_abs(self.propagate_exception_path) + # + rawstart = mc.materialize(self.cpu, []) + self.stack_check_slowpath = rawstart def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -573,7 +608,7 @@ self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # - # TODO self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) rawstart = self.materialize_loop(original_loop_token) debug_bridge(descr_number, rawstart, codeendpos) self.patch_pending_failure_recoveries(rawstart) @@ -628,7 +663,7 @@ def propagate_memoryerror_if_r2_is_null(self): # if self.propagate_exception_path == 0 (tests), this may jump to 0 # and segfaults. too bad. the alternative is to continue anyway - # with r3==0, but that will segfault too. + # with r2==0, but that will segfault too. self.mc.cmp_op(r.r2, l.imm(0), imm=True) self.mc.load_imm(r.RETURN, self.propagate_exception_path) self.mc.BCR(c.EQ, r.RETURN) @@ -1048,6 +1083,29 @@ # exit function self._call_footer() + def _store_and_reset_exception(self, mc, excvalloc, exctploc=None): + """Reset the exception, after fetching it inside the two regs. + """ + mc.load_imm(r.r2, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + # Load the exception fields into the two registers + mc.load(excvalloc, r.r2, 0) + if exctploc is not None: + mc.load(exctploc, r.r2, diff) + # Zero out the exception fields + mc.LGHI(r.r0, l.imm(0)) + mc.STG(r.r0, l.addr(0, r.r2)) + mc.STG(r.r0, l.addr(diff, r.r2)) + + def _restore_exception(self, mc, excvalloc, exctploc): + mc.load_imm(r.r2, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert check_imm_value(diff) + # Store the exception fields from the two registers + mc.STG(excvalloc, l.addr(0, r.r2)) + mc.STG(exctploc, l.addr(diff, r.r2)) + def load_gcmap(self, mc, reg, gcmap): # load the current gcmap into register 'reg' ptr = rffi.cast(lltype.Signed, gcmap) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -126,6 +126,7 @@ if gcrootmap.is_shadow_stack and self.is_call_release_gil: # in this mode, RSHADOWOLD happens to contain the shadowstack # top at this point, so reuse it instead of loading it again + xxx ssreg = self.RSHADOWOLD self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -175,6 +175,16 @@ self.LGFI(dest_reg, l.imm(word & 0xFFFFffff)) self.IIHF(dest_reg, l.imm((word >> 32) & 0xFFFFffff)) + def load_imm_plus(self, dest_reg, word): + """Like load_imm(), but with one instruction less, and + leaves the loaded value off by some signed 16-bit difference. + Returns that difference.""" + diff = rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, word)) + word -= diff + assert word & 0xFFFF == 0 + self.load_imm(dest_reg, word) + return diff + def sync(self): # see sync. section of the zarch manual! self.BCR_rr(0xf,0) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -739,10 +739,7 @@ assert check_imm_value(diff) mc.LG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) - if not loc.is_in_pool() and loc.is_imm(): - mc.cmp_op(r.SCRATCH2, loc, imm=True) - else: - mc.cmp_op(r.SCRATCH2, loc, pool=loc.is_in_pool()) + mc.cmp_op(r.SCRATCH2, loc) self.guard_success_cc = c.EQ self._emit_guard(op, failargs) @@ -752,6 +749,17 @@ mc.SG(r.SCRATCH2, l.addr(0, r.SCRATCH)) mc.SG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) + def emit_save_exc_class(self, op, arglocs, regalloc): + [resloc] = arglocs + diff = self.mc.load_imm_plus(r.r2, self.cpu.pos_exception()) + self.mc.load(resloc, r.r2, diff) + + def emit_save_exception(self, op, arglocs, regalloc): + [resloc] = arglocs + self._store_and_reset_exception(self.mc, resloc) + + def emit_restore_exception(self, op, arglocs, regalloc): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) class MemoryOpAssembler(object): _mixin_ = True diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -1032,7 +1032,7 @@ return locs def prepare_guard_exception(self, op): - loc = self.ensure_reg(op.getarg(0)) + loc = self.ensure_reg(op.getarg(0), force_in_reg=True) if op in self.longevity: resloc = self.force_allocate_reg(op) else: @@ -1040,6 +1040,16 @@ arglocs = self._prepare_guard(op, [loc, resloc]) return arglocs + def prepare_save_exception(self, op): + res = self.rm.force_allocate_reg(op) + return [res] + prepare_save_exc_class = prepare_save_exception + + def prepare_restore_exception(self, op): + loc0 = self.ensure_reg(op.getarg(0), force_in_reg=True) + loc1 = self.ensure_reg(op.getarg(1), force_in_reg=True) + return [loc0, loc1] + def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0), force_in_reg=True) dst_ptr_loc = self.ensure_reg(op.getarg(1), force_in_reg=True) From pypy.commits at gmail.com Tue Dec 29 20:30:30 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:30 -0800 (PST) Subject: [pypy-commit] pypy default: Remove error=CANNOT_FAIL: if self.__getattribute__() fails, the exception must be propagated. Message-ID: <568333b6.482e1c0a.ef337.ffffd3ac@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r81493:9fbb68a334bc Date: 2015-12-30 00:27 +0100 http://bitbucket.org/pypy/pypy/changeset/9fbb68a334bc/ Log: Remove error=CANNOT_FAIL: if self.__getattribute__() fails, the exception must be propagated. This is the default for functions returning a PyObject*. diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,8 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - CANNOT_FAIL) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -387,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=CANNOT_FAIL, external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -414,15 +414,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) From pypy.commits at gmail.com Tue Dec 29 20:30:32 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:32 -0800 (PST) Subject: [pypy-commit] pypy default: Fix test. Message-ID: <568333b8.022f1c0a.fa884.ffffc0f8@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r81494:d3465d7f76a2 Date: 2015-12-30 00:38 +0100 http://bitbucket.org/pypy/pypy/changeset/d3465d7f76a2/ Log: Fix test. diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -648,7 +648,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -668,7 +668,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; From pypy.commits at gmail.com Tue Dec 29 20:30:34 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:34 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: hg merge default Message-ID: <568333ba.cf821c0a.e2c36.3f52@mx.google.com> Author: Amaury Forgeot d'Arc Branch: cpyext-ext Changeset: r81495:270c041cd532 Date: 2015-12-30 00:38 +0100 http://bitbucket.org/pypy/pypy/changeset/270c041cd532/ Log: hg merge default diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -4,8 +4,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, - CANNOT_FAIL) + cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES) from pypy.module.cpyext.typeobjectdefs import ( unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, @@ -387,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=CANNOT_FAIL, external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -416,15 +416,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) @@ -647,7 +658,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -667,7 +678,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - size = abs(boxes[3].getint()) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -849,7 +849,7 @@ index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() - size = abs(boxes[5].getint()) + size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,11 +126,11 @@ def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, itemsize, factor, offset): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # - if factor == 1 and offset == 0: - args = [ptr_box, index_box, value_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] newload = ResOperation(rop.GC_STORE, args) else: args = [ptr_box, index_box, value_box, ConstInt(factor), @@ -153,18 +153,15 @@ index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - def _emit_mul_add_if_factor_offset_not_supported(self, index_box, factor, offset): - orig_factor = factor - # factor - must_manually_load_const = False # offset != 0 and not self.cpu.load_constant_offset - if factor != 1 and (factor not in self.cpu.load_supported_factors or \ - (not index_box.is_constant() and must_manually_load_const)): - # enter here if the factor is supported by the cpu - # OR the index is not constant and a new resop must be emitted - # to add the offset - if isinstance(index_box, ConstInt): - index_box = ConstInt(index_box.value * factor) - else: + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu # x & (x - 1) == 0 is a quick test for power of 2 assert factor > 0 if (factor & (factor - 1)) == 0: @@ -174,20 +171,13 @@ index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) self.emit_op(index_box) - factor = 1 - # adjust the constant offset - #if must_manually_load_const: - # if isinstance(index_box, ConstInt): - # index_box = ConstInt(index_box.value + offset) - # else: - # index_box = ResOperation(rop.INT_ADD, [index_box, ConstInt(offset)]) - # self.emit_op(index_box) - # offset = 0 - return factor, offset, index_box + factor = 1 + return factor, offset, index_box - def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, factor, offset, sign, type='i'): + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # if sign: @@ -197,8 +187,8 @@ optype = type if op is not None: optype = op.type - if factor == 1 and offset == 0: - args = [ptr_box, index_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] newload = ResOperation(OpHelpers.get_gc_load(optype), args) else: args = [ptr_box, index_box, ConstInt(factor), @@ -547,9 +537,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], descr=descrs.arraydescr) @@ -560,9 +549,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) @@ -612,15 +600,12 @@ descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - # emit GC_LOAD_INDEXED - itemsize, basesize, _ = unpack_arraydescr(descr) - factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(ConstInt(index), - itemsize, basesize) - args = [frame, index_box, arg, ConstInt(factor), - ConstInt(offset), ConstInt(itemsize)] - self.emit_op(ResOperation(rop.GC_STORE_INDEXED, args)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) descr = op.getdescr() assert isinstance(descr, JitCellToken) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -30,13 +30,26 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): - def trans_getarray_to_load(descr): - size = descr.basesize - if descr.is_item_signed(): - size = -size - return ','.join([str(n) for n in [descr.itemsize, - descr.basesize, - size]]) + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) @@ -376,7 +389,7 @@ gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) gc_store(p2, 0, 1234, 8) - gc_store(p1, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -485,7 +498,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - gc_store_indexed(p0, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -611,19 +624,19 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, 14, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p1, 0, 10, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - gc_store_indexed(p2, 0, i2, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - gc_store_indexed(p3, 0, i2, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -636,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -650,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -671,7 +684,7 @@ zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,7 +706,7 @@ zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -705,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -725,7 +738,7 @@ zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -743,12 +756,12 @@ size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - gc_store_indexed(p1, 0, p2, %(scale)s, %(offset)s, %(size)s) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) """, interiorzdescr=interiorzdescr, scale=scale, offset=offset, size=size) @@ -763,7 +776,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -781,7 +794,7 @@ p1 = nursery_ptr_increment(p0, %(tdescr.size)d) gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -798,7 +811,7 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, i2, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -816,8 +829,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - gc_store_indexed(p0, 1, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -835,8 +848,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -855,9 +868,9 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -878,11 +891,11 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -901,10 +914,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -923,10 +936,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -955,7 +968,7 @@ gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(bdescr.basesize)s, 1) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -991,10 +1004,10 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - gc_store_indexed(p1, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1012,7 +1025,7 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1025,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) - gc_store_indexed(p0, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -1036,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_size.field_size)s) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) - i2 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_depth.field_size)s) - gc_store_indexed(p1, 0, 0, 1, 1, %(jf_extra_stack_depth.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_savedata.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_force_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_guard_exc.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_forward.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s gc_store(p1, 0, i2, %(framelendescr.field_size)s) - gc_store_indexed(p1, 0, ConstClass(frame_info), 1, 1, %(jf_frame_info.field_size)s) - gc_store_indexed(p1, 0, i0, 8, 3, 8) - gc_store_indexed(p1, 1, f0, 8, 5, 8) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1101,7 +1114,7 @@ p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = gc_load_indexed_r(p0, 0, 1, %(tzdescr.field_size)s, %(tzdescr.field_size)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1155,23 +1168,19 @@ # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_raw(p0,i1,descr=ydescr)' '->' - 'i3 = gc_store_indexed(p0,0,i1,1,' - '%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_gc(p0,p0,descr=zdescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' 'cond_call_gc_wb(p0, descr=wbdescr);' - 'i3 = gc_store_indexed(p0,0,p0,1,' - '%(zdescr.offset)s,%(zdescr.field_size)s)'], + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], #[False, (1,), 'i3 = strlen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], [True, (1,), 'i3 = strlen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' @@ -1179,7 +1188,7 @@ # '%(unicodelendescr.offset)s,' # '%(unicodelendescr.field_size)s)'], [True, (1,), 'i3 = unicodelen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1039,7 +1038,8 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1061,7 +1061,8 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1083,9 +1084,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1108,9 +1109,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1204,8 +1204,12 @@ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- # same paramters as GC_LOAD, but one additional for the value to store - # note that the itemsize is not signed! + # note that the itemsize is not signed (always > 0) # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant 'GC_STORE/4d/n', 'GC_STORE_INDEXED/6d/n', diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -4,7 +4,7 @@ r_dict, UnboxedValue, Symbolic, compute_hash, compute_identity_hash, compute_unique_id, current_object_addr_as_int, we_are_translated, prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, - resizelist_hint, is_annotation_constant, always_inline, + resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) from rpython.translator.translator import TranslationContext, graphof @@ -529,6 +529,18 @@ TYPES = [v.concretetype for v in graph.getargs()] assert TYPES == [lltype.Signed, lltype.Float] +def test_enforceargs_not_constant(): + from rpython.translator.translator import TranslationContext, graphof + @enforceargs(NOT_CONSTANT) + def f(a): + return a + def f42(): + return f(42) + t = TranslationContext() + a = t.buildannotator() + s = a.build_types(f42, []) + assert not hasattr(s, 'const') + def getgraph(f, argtypes): from rpython.translator.translator import TranslationContext, graphof diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -9,6 +9,8 @@ from subprocess import PIPE, Popen def run_subprocess(executable, args, env=None, cwd=None): + if isinstance(args, list): + args = [a.encode('latin1') for a in args] return _run(executable, args, env, cwd) shell_default = False From pypy.commits at gmail.com Tue Dec 29 20:30:35 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:35 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Un-skip some appdirect tests: TP_FLAGS_DEFAULT is really really needed for all types since Python 2.2... Message-ID: <568333bb.d4811c0a.6ecfa.3621@mx.google.com> Author: Amaury Forgeot d'Arc Branch: cpyext-ext Changeset: r81496:8520e2b4b038 Date: 2015-12-30 01:36 +0100 http://bitbucket.org/pypy/pypy/changeset/8520e2b4b038/ Log: Un-skip some appdirect tests: TP_FLAGS_DEFAULT is really really needed for all types since Python 2.2... diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -92,7 +92,7 @@ res = generic_cpy_call(space, func_inquiry, w_self) res = rffi.cast(lltype.Signed, res) if res == -1: - space.fromcache(State).check_and_raise_exception() + space.fromcache(State).check_and_raise_exception(always=True) return space.wrap(bool(res)) def wrap_getattr(space, w_self, w_args, func): diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -502,12 +502,11 @@ assert module.tp_str(C()) == "text" def test_mp_ass_subscript(self): - if self.runappdirect: - py.test.xfail('segfault') module = self.import_extension('foo', [ ("new_obj", "METH_NOARGS", ''' PyObject *obj; + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; Foo_Type.tp_as_mapping = &tp_as_mapping; tp_as_mapping.mp_ass_subscript = mp_ass_subscript; if (PyType_Ready(&Foo_Type) < 0) return NULL; @@ -537,12 +536,11 @@ assert res is None def test_sq_contains(self): - if self.runappdirect: - py.test.xfail('segfault') module = self.import_extension('foo', [ ("new_obj", "METH_NOARGS", ''' PyObject *obj; + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT; Foo_Type.tp_as_sequence = &tp_as_sequence; tp_as_sequence.sq_contains = sq_contains; if (PyType_Ready(&Foo_Type) < 0) return NULL; @@ -596,18 +594,17 @@ raises(StopIteration, module.tp_iternext, it) def test_bool(self): - if self.runappdirect: - py.test.xfail('segfault') module = self.import_extension('foo', [ ("newInt", "METH_VARARGS", """ IntLikeObject *intObj; - long intval; + int intval; PyObject *name; if (!PyArg_ParseTuple(args, "i", &intval)) return NULL; + IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT; IntLike_Type.tp_as_number = &intlike_as_number; intlike_as_number.nb_nonzero = intlike_nb_nonzero; if (PyType_Ready(&IntLike_Type) < 0) return NULL; @@ -633,6 +630,7 @@ PyErr_SetNone(PyExc_ValueError); return -1; } + /* Returning -1 should be for exceptions only! */ return v->value; } @@ -646,12 +644,10 @@ """) assert not bool(module.newInt(0)) assert bool(module.newInt(1)) - assert bool(module.newInt(-1)) + raises(SystemError, bool, module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) def test_binaryfunc(self): - if self.runappdirect: - py.test.xfail('segfault') module = self.import_extension('foo', [ ("newInt", "METH_VARARGS", """ @@ -662,7 +658,7 @@ return NULL; IntLike_Type.tp_as_number = &intlike_as_number; - IntLike_Type.tp_flags |= Py_TPFLAGS_CHECKTYPES; + IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES; intlike_as_number.nb_add = intlike_nb_add; if (PyType_Ready(&IntLike_Type) < 0) return NULL; intObj = PyObject_New(IntLikeObject, &IntLike_Type); @@ -681,7 +677,7 @@ if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; - IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; + IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES; if (PyType_Ready(&IntLike_Type_NoOp) < 0) return NULL; intObjNoOp = PyObject_New(IntLikeObjectNoOp, &IntLike_Type_NoOp); if (!intObjNoOp) { From pypy.commits at gmail.com Tue Dec 29 20:30:39 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:39 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Fix a segfault with -A: a metatype always creates heap types. Message-ID: <568333bf.08e11c0a.a630e.61a8@mx.google.com> Author: Amaury Forgeot d'Arc Branch: cpyext-ext Changeset: r81498:ead9f50764bf Date: 2015-12-30 02:28 +0100 http://bitbucket.org/pypy/pypy/changeset/ead9f50764bf/ Log: Fix a segfault with -A: a metatype always creates heap types. diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -378,7 +378,7 @@ PyObject_HEAD_INIT(NULL) 0, "foo.Meta", - sizeof(PyTypeObject), /*tp_basicsize*/ + sizeof(PyHeapTypeObject),/*tp_basicsize*/ 0, /*tp_itemsize*/ 0, /*tp_dealloc*/ 0, /*tp_print*/ From pypy.commits at gmail.com Tue Dec 29 20:30:37 2015 From: pypy.commits at gmail.com (amauryfa) Date: Tue, 29 Dec 2015 17:30:37 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Fix appdirect test: CPython does not have a dictionary for C types. Message-ID: <568333bd.84e31c0a.fd288.ffffd285@mx.google.com> Author: Amaury Forgeot d'Arc Branch: cpyext-ext Changeset: r81497:5f5896c1b060 Date: 2015-12-30 01:43 +0100 http://bitbucket.org/pypy/pypy/changeset/5f5896c1b060/ Log: Fix appdirect test: CPython does not have a dictionary for C types. (cpyext should maybe do the same, but this is another story) diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -150,6 +150,7 @@ if (v == -1 && PyErr_Occurred()) return -1; self->foo = v; + return 0; } return PyObject_GenericSetAttr((PyObject *)self, name, value); } From pypy.commits at gmail.com Wed Dec 30 06:51:35 2015 From: pypy.commits at gmail.com (rlamy) Date: Wed, 30 Dec 2015 03:51:35 -0800 (PST) Subject: [pypy-commit] pypy exctrans: create gc header during databasing instead of during sourcing Message-ID: <5683c547.0c2e1c0a.56608.585f@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81500:adc26e26c112 Date: 2015-12-30 04:14 +0100 http://bitbucket.org/pypy/pypy/changeset/adc26e26c112/ Log: create gc header during databasing instead of during sourcing diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -539,7 +539,17 @@ class StructNode(ContainerNode): nodekind = 'struct' if USESLOTS: - __slots__ = () + __slots__ = ('gc_init',) + + def __init__(self, db, T, obj): + ContainerNode.__init__(self, db, T, obj) + if needs_gcheader(T): + gct = self.db.gctransformer + if gct is not None: + self.gc_init = gct.gcheader_initdata(self) + db.getcontainernode(self.gc_init) + else: + self.gc_init = None def basename(self): T = self.getTYPE() @@ -566,12 +576,7 @@ data = [] if needs_gcheader(T): - gct = defnode.db.gctransformer - if gct is not None: - gc_init = gct.gcheader_initdata(self) - else: - gc_init = None - data.append(('gcheader', gc_init)) + data.append(('gcheader', self.gc_init)) for name in defnode.fieldnames: data.append((name, getattr(self.obj, name))) @@ -664,7 +669,17 @@ class ArrayNode(ContainerNode): nodekind = 'array' if USESLOTS: - __slots__ = () + __slots__ = ('gc_init',) + + def __init__(self, db, T, obj): + ContainerNode.__init__(self, db, T, obj) + if needs_gcheader(T): + gct = self.db.gctransformer + if gct is not None: + self.gc_init = gct.gcheader_initdata(self) + db.getcontainernode(self.gc_init) + else: + self.gc_init = None def getptrname(self): if barebonearray(self.getTYPE()): @@ -684,12 +699,7 @@ T = self.getTYPE() yield '{' if needs_gcheader(T): - gct = self.db.gctransformer - if gct is not None: - gc_init = gct.gcheader_initdata(self) - else: - gc_init = None - lines = generic_initializationexpr(self.db, gc_init, 'gcheader', + lines = generic_initializationexpr(self.db, self.gc_init, 'gcheader', '%sgcheader' % (decoration,)) for line in lines: yield line From pypy.commits at gmail.com Wed Dec 30 06:51:33 2015 From: pypy.commits at gmail.com (rlamy) Date: Wed, 30 Dec 2015 03:51:33 -0800 (PST) Subject: [pypy-commit] pypy exctrans: Move *_gcheader_initdata() methods from gcpolicy to gctransformer Message-ID: <5683c545.8a75c20a.13c37.686b@mx.google.com> Author: Ronan Lamy Branch: exctrans Changeset: r81499:925913cbec05 Date: 2015-12-30 03:31 +0100 http://bitbucket.org/pypy/pypy/changeset/925913cbec05/ Log: Move *_gcheader_initdata() methods from gcpolicy to gctransformer diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -74,7 +74,7 @@ def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size, c_offset_to_length): - # XXX same behavior for zero=True: in theory that's wrong + # XXX same behavior for zero=True: in theory that's wrong if c_offset_to_length is None: v_raw = hop.genop("direct_call", [self.malloc_varsize_no_length_ptr, v_length, @@ -156,6 +156,11 @@ resulttype = lltype.Signed) hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result) + def gcheader_initdata(self, defnode): + hdr = lltype.malloc(self.HDR, immortal=True) + hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr()) + return hdr._obj + ########## weakrefs ########## # Boehm: weakref objects are small structures containing only a Boehm diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -292,7 +292,6 @@ s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata - translator = self.translator #use the GC flag to find which malloc method to use #malloc_zero_filled == Ture -> malloc_fixedsize/varsize_clear #malloc_zero_filled == Flase -> malloc_fixedsize/varsize @@ -326,7 +325,7 @@ GCClass.malloc_varsize.im_func, [s_gc, s_typeid16] + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) - + self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, @@ -1389,7 +1388,7 @@ [v] + previous_steps + [c_name, c_null]) else: llops.genop('bare_setfield', [v, c_name, c_null]) - + return elif isinstance(TYPE, lltype.Array): ITEM = TYPE.OF @@ -1416,6 +1415,25 @@ resulttype=llmemory.Address) llops.genop('raw_memclear', [v_adr, v_totalsize]) + def gcheader_initdata(self, defnode): + o = lltype.top_container(defnode.obj) + needs_hash = self.get_prebuilt_hash(o) is not None + hdr = self.gc_header_for(o, needs_hash) + return hdr._obj + + def get_prebuilt_hash(self, obj): + # for prebuilt objects that need to have their hash stored and + # restored. Note that only structures that are StructNodes all + # the way have their hash stored (and not e.g. structs with var- + # sized arrays at the end). 'obj' must be the top_container. + TYPE = lltype.typeOf(obj) + if not isinstance(TYPE, lltype.GcStruct): + return None + if TYPE._is_varsize(): + return None + return getattr(obj, '_hash_cache_', None) + + class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder): diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -285,3 +285,7 @@ resulttype=llmemory.Address) hop.genop("direct_call", [self.identityhash_ptr, v_adr], resultvar=hop.spaceop.result) + + def gcheader_initdata(self, defnode): + top = lltype.top_container(defnode.obj) + return self.gcheaderbuilder.header_of_object(top)._obj diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -378,6 +378,10 @@ return hop.cast_result(rmodel.inputconst(lltype.Ptr(ARRAY_TYPEID_MAP), lltype.nullptr(ARRAY_TYPEID_MAP))) + def get_prebuilt_hash(self, obj): + return None + + class MinimalGCTransformer(BaseGCTransformer): def __init__(self, parenttransformer): BaseGCTransformer.__init__(self, parenttransformer.translator) diff --git a/rpython/translator/c/gc.py b/rpython/translator/c/gc.py --- a/rpython/translator/c/gc.py +++ b/rpython/translator/c/gc.py @@ -1,8 +1,7 @@ import sys from rpython.flowspace.model import Constant -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import (typeOf, RttiStruct, - RuntimeTypeInfo, top_container) +from rpython.rtyper.lltypesystem.lltype import (RttiStruct, + RuntimeTypeInfo) from rpython.translator.c.node import ContainerNode from rpython.translator.c.support import cdecl from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -18,23 +17,12 @@ return defnode.db.gctransformer.HDR return None - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - raise NotImplementedError - return None - def struct_gcheader_definition(self, defnode): return self.common_gcheader_definition(defnode) - def struct_gcheader_initdata(self, defnode): - return self.common_gcheader_initdata(defnode) - def array_gcheader_definition(self, defnode): return self.common_gcheader_definition(defnode) - def array_gcheader_initdata(self, defnode): - return self.common_gcheader_initdata(defnode) - def compilation_info(self): if not self.db: return ExternalCompilationInfo() @@ -46,9 +34,6 @@ ] ) - def get_prebuilt_hash(self, obj): - return None - def need_no_typeptr(self): return False @@ -113,13 +98,6 @@ from rpython.memory.gctransform import refcounting return refcounting.RefcountingGCTransformer(translator) - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - gct = defnode.db.gctransformer - top = top_container(defnode.obj) - return gct.gcheaderbuilder.header_of_object(top)._obj - return None - # for structs def struct_setup(self, structdefnode, rtti): @@ -201,13 +179,6 @@ from rpython.memory.gctransform import boehm return boehm.BoehmGCTransformer(translator) - def common_gcheader_initdata(self, defnode): - if defnode.db.gctransformer is not None: - hdr = lltype.malloc(defnode.db.gctransformer.HDR, immortal=True) - hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr()) - return hdr._obj - return None - def array_setup(self, arraydefnode): pass @@ -362,24 +333,6 @@ args = [funcgen.expr(v) for v in op.args] return '%s = %s; /* for moving GCs */' % (args[1], args[0]) - def common_gcheader_initdata(self, defnode): - o = top_container(defnode.obj) - needs_hash = self.get_prebuilt_hash(o) is not None - hdr = defnode.db.gctransformer.gc_header_for(o, needs_hash) - return hdr._obj - - def get_prebuilt_hash(self, obj): - # for prebuilt objects that need to have their hash stored and - # restored. Note that only structures that are StructNodes all - # the way have their hash stored (and not e.g. structs with var- - # sized arrays at the end). 'obj' must be the top_container. - TYPE = typeOf(obj) - if not isinstance(TYPE, lltype.GcStruct): - return None - if TYPE._is_varsize(): - return None - return getattr(obj, '_hash_cache_', None) - def need_no_typeptr(self): config = self.db.translator.config return config.translation.gcremovetypeptr diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -566,7 +566,11 @@ data = [] if needs_gcheader(T): - gc_init = self.db.gcpolicy.struct_gcheader_initdata(self) + gct = defnode.db.gctransformer + if gct is not None: + gc_init = gct.gcheader_initdata(self) + else: + gc_init = None data.append(('gcheader', gc_init)) for name in defnode.fieldnames: @@ -640,7 +644,7 @@ def implementation(self): hash_typename = self.get_hash_typename() - hash = self.db.gcpolicy.get_prebuilt_hash(self.obj) + hash = self.db.gctransformer.get_prebuilt_hash(self.obj) assert hash is not None lines = list(self.initializationexpr()) lines.insert(0, '%s = { {' % ( @@ -650,7 +654,7 @@ return lines def gcstructnode_factory(db, T, obj): - if db.gcpolicy.get_prebuilt_hash(obj) is not None: + if db.gctransformer.get_prebuilt_hash(obj) is not None: cls = GcStructNodeWithHash else: cls = StructNode @@ -680,7 +684,11 @@ T = self.getTYPE() yield '{' if needs_gcheader(T): - gc_init = self.db.gcpolicy.array_gcheader_initdata(self) + gct = self.db.gctransformer + if gct is not None: + gc_init = gct.gcheader_initdata(self) + else: + gc_init = None lines = generic_initializationexpr(self.db, gc_init, 'gcheader', '%sgcheader' % (decoration,)) for line in lines: From pypy.commits at gmail.com Wed Dec 30 10:32:05 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 30 Dec 2015 07:32:05 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: wrong operation (SG instead of STG) lead to substraction instead of 64bit store, test_exception is now passing! Message-ID: <5683f8f5.e935c20a.161dc.ffffbb1f@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81501:79c8712b54e0 Date: 2015-12-30 16:31 +0100 http://bitbucket.org/pypy/pypy/changeset/79c8712b54e0/ Log: wrong operation (SG instead of STG) lead to substraction instead of 64bit store, test_exception is now passing! diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -65,7 +65,6 @@ def execute_operations(self, inputargs, operations, result_type): looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) - #import pdb; pdb.set_trace() args = [] for box in inputargs: if box.type == 'i': @@ -4980,7 +4979,6 @@ def test_increment_debug_counter(self): foo = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') foo[0] = 1789200 - print "addr" , hex(rffi.cast(lltype.Signed, foo)) self.execute_operation(rop.INCREMENT_DEBUG_COUNTER, [ConstInt(rffi.cast(lltype.Signed, foo))], 'void') @@ -5006,12 +5004,11 @@ addr = llmemory.cast_ptr_to_adr(a) a_int = heaptracker.adr2int(addr) a_ref = lltype.cast_opaque_ptr(llmemory.GCREF, a) - for (start, length) in [(0,100), (49, 49), (1, 98), + for (start, length) in [(0, 100), (49, 49), (1, 98), (15, 9), (10, 10), (47, 0), (0, 4)]: for cls1 in [ConstInt, InputArgInt]: for cls2 in [ConstInt, InputArgInt]: - print 'ptr:', hex(rffi.cast(lltype.Signed, a_ref)) print 'a_int:', a_int print 'of:', OF print 'start:', cls1.__name__, start @@ -5049,10 +5046,6 @@ scalebox = ConstInt(arraydescr.itemsize) inputargs, oplist = self._get_operation_list(ops,'void') - # XXX - print("input:", inputargs) - for op in oplist: - print(op) self.execute_operations(inputargs, oplist, 'void') assert len(a) == 100 for i in range(100): diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -295,9 +295,9 @@ # f) store the address of the new jitframe in the shadowstack # c) set the gcmap field to 0 in the new jitframe # g) restore registers and return + mc = InstrBuilder() + self.mc = mc return - mc = PPCBuilder() - self.mc = mc # signature of this _frame_realloc_slowpath function: # * on entry, r0 is the new size @@ -305,13 +305,13 @@ # * no managed register must be modified ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.store(r.r2.value, r.SPP.value, ofs2) + mc.STG(r.r2, l.addr(ofs2, r.SPP)) self._push_core_regs_to_jitframe(mc) self._push_fp_regs_to_jitframe(mc) # Save away the LR inside r30 - mc.mflr(r.RCS1.value) + #mc.mflr(r.RCS1.value) # First argument is SPP (= r31), which is the jitframe mc.mr(r.r3.value, r.SPP.value) @@ -493,6 +493,7 @@ ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) #mc.LG(r.r2, l.addr(ofs, r.SPP)) patch_pos = mc.currpos() + self.mc.trap() #mc.TRAP2() # placeholder for cmpdi(0, r2, ...) #mc.TRAP2() # placeholder for bge #mc.TRAP2() # placeholder for li(r0, ...) @@ -664,8 +665,8 @@ # if self.propagate_exception_path == 0 (tests), this may jump to 0 # and segfaults. too bad. the alternative is to continue anyway # with r2==0, but that will segfault too. + self.mc.load_imm(r.RETURN, self.propagate_exception_path) self.mc.cmp_op(r.r2, l.imm(0), imm=True) - self.mc.load_imm(r.RETURN, self.propagate_exception_path) self.mc.BCR(c.EQ, r.RETURN) def regalloc_push(self, loc, already_pushed): @@ -843,6 +844,7 @@ for traps_pos, jmp_target in self.frame_depth_to_patch: pmc = OverwritingBuilder(self.mc, traps_pos, 3) # three traps, so exactly three instructions to patch here + xxx #pmc.cmpdi(0, r.r2.value, frame_depth) # 1 #pmc.bc(7, 0, jmp_target - (traps_pos + 4)) # 2 "bge+" #pmc.li(r.r0.value, frame_depth) # 3 @@ -1111,6 +1113,158 @@ ptr = rffi.cast(lltype.Signed, gcmap) mc.load_imm(reg, ptr) + def malloc_cond_varsize_frame(self, nursery_free_adr, nursery_top_adr, + sizeloc, gcmap): + xxx + diff = nursery_top_adr - nursery_free_adr + assert _check_imm_arg(diff) + mc = self.mc + mc.load_imm(r.r2, nursery_free_adr) + + if sizeloc is r.RES: + mc.mr(r.RSZ.value, r.RES.value) + sizeloc = r.RSZ + + mc.load(r.RES.value, r.r2.value, 0) # load nursery_free + mc.load(r.SCRATCH.value, r.r2.value, diff) # load nursery_top + + mc.add(r.RSZ.value, r.RES.value, sizeloc.value) + + mc.cmp_op(0, r.RSZ.value, r.SCRATCH.value, signed=False) + + fast_jmp_pos = mc.currpos() + mc.trap() # conditional jump, patched later + + # new value of nursery_free_adr in RSZ and the adr of the new object + # in RES. + self.load_gcmap(mc, r.r2, gcmap) + mc.bl_abs(self.malloc_slowpath) + + offset = mc.currpos() - fast_jmp_pos + pmc = OverwritingBuilder(mc, fast_jmp_pos, 1) + pmc.bc(7, 1, offset) # jump if LE (not GT), predicted to be true + pmc.overwrite() + + mc.store(r.RSZ.value, r.r2.value, 0) # store into nursery_free + + def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, + lengthloc, itemsize, maxlength, gcmap, + arraydescr): + xxx + from rpython.jit.backend.llsupport.descr import ArrayDescr + assert isinstance(arraydescr, ArrayDescr) + + # lengthloc is the length of the array, which we must not modify! + assert lengthloc is not r.RES and lengthloc is not r.RSZ + assert lengthloc.is_reg() + + if maxlength > 2**16-1: + maxlength = 2**16-1 # makes things easier + mc = self.mc + mc.cmp_op(0, lengthloc.value, maxlength, imm=True, signed=False) + + jmp_adr0 = mc.currpos() + mc.trap() # conditional jump, patched later + + # ------------------------------------------------------------ + # block of code for the case: the length is <= maxlength + + diff = nursery_top_adr - nursery_free_adr + assert _check_imm_arg(diff) + mc.load_imm(r.r2, nursery_free_adr) + + varsizeloc = self._multiply_by_constant(lengthloc, itemsize, + r.RSZ) + # varsizeloc is either RSZ here, or equal to lengthloc if + # itemsize == 1. It is the size of the variable part of the + # array, in bytes. + + mc.load(r.RES.value, r.r2.value, 0) # load nursery_free + mc.load(r.SCRATCH.value, r.r2.value, diff) # load nursery_top + + assert arraydescr.basesize >= self.gc_minimal_size_in_nursery + constsize = arraydescr.basesize + self.gc_size_of_header + force_realignment = (itemsize % WORD) != 0 + if force_realignment: + constsize += WORD - 1 + mc.addi(r.RSZ.value, varsizeloc.value, constsize) + if force_realignment: + # "& ~(WORD-1)" + bit_limit = 60 if WORD == 8 else 61 + mc.rldicr(r.RSZ.value, r.RSZ.value, 0, bit_limit) + + mc.add(r.RSZ.value, r.RES.value, r.RSZ.value) + # now RSZ contains the total size in bytes, rounded up to a multiple + # of WORD, plus nursery_free_adr + + mc.cmp_op(0, r.RSZ.value, r.SCRATCH.value, signed=False) + + jmp_adr1 = mc.currpos() + mc.trap() # conditional jump, patched later + + # ------------------------------------------------------------ + # block of code for two cases: either the length is > maxlength + # (jump from jmp_adr0), or the length is small enough but there + # is not enough space in the nursery (fall-through) + # + offset = mc.currpos() - jmp_adr0 + pmc = OverwritingBuilder(mc, jmp_adr0, 1) + pmc.bgt(offset) # jump if GT + pmc.overwrite() + # + # save the gcmap + self.load_gcmap(mc, r.r2, gcmap) + # + # load the function to call into CTR + if kind == rewrite.FLAG_ARRAY: + addr = self.malloc_slowpath_varsize + elif kind == rewrite.FLAG_STR: + addr = self.malloc_slowpath_str + elif kind == rewrite.FLAG_UNICODE: + addr = self.malloc_slowpath_unicode + else: + raise AssertionError(kind) + mc.load_imm(r.SCRATCH, addr) + mc.mtctr(r.SCRATCH.value) + # + # load the argument(s) + if kind == rewrite.FLAG_ARRAY: + mc.mr(r.RSZ.value, lengthloc.value) + mc.load_imm(r.RES, itemsize) + mc.load_imm(r.SCRATCH, arraydescr.tid) + else: + mc.mr(r.RES.value, lengthloc.value) + # + # call! + mc.bctrl() + + jmp_location = mc.currpos() + mc.trap() # jump forward, patched later + + # ------------------------------------------------------------ + # block of code for the common case: the length is <= maxlength + # and there is enough space in the nursery + + offset = mc.currpos() - jmp_adr1 + pmc = OverwritingBuilder(mc, jmp_adr1, 1) + pmc.ble(offset) # jump if LE + pmc.overwrite() + # + # write down the tid, but only in this case (not in other cases + # where r.RES is the result of the CALL) + mc.load_imm(r.SCRATCH, arraydescr.tid) + mc.store(r.SCRATCH.value, r.RES.value, 0) + # while we're at it, this line is not needed if we've done the CALL + mc.store(r.RSZ.value, r.r2.value, 0) # store into nursery_free + + # ------------------------------------------------------------ + + offset = mc.currpos() - jmp_location + pmc = OverwritingBuilder(mc, jmp_location, 1) + pmc.b(offset) # jump always + pmc.overwrite() + + def notimplemented_op(asm, op, arglocs, regalloc): print "[ZARCH/asm] %s not implemented" % op.getopname() raise NotImplementedError(op) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -746,8 +746,8 @@ if resloc: mc.load(resloc, r.SCRATCH, 0) mc.LGHI(r.SCRATCH2, l.imm(0)) - mc.SG(r.SCRATCH2, l.addr(0, r.SCRATCH)) - mc.SG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) def emit_save_exc_class(self, op, arglocs, regalloc): [resloc] = arglocs From pypy.commits at gmail.com Wed Dec 30 10:52:46 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 30 Dec 2015 07:52:46 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: save/restore/reset exception is now working as expected by the test, overwrote value in a register that was passed to the vm Message-ID: <5683fdce.e251c20a.14fea.ffffdaac@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81502:afb077bff965 Date: 2015-12-30 16:51 +0100 http://bitbucket.org/pypy/pypy/changeset/afb077bff965/ Log: save/restore/reset exception is now working as expected by the test, overwrote value in a register that was passed to the vm diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1088,25 +1088,25 @@ def _store_and_reset_exception(self, mc, excvalloc, exctploc=None): """Reset the exception, after fetching it inside the two regs. """ - mc.load_imm(r.r2, self.cpu.pos_exc_value()) + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() assert check_imm_value(diff) # Load the exception fields into the two registers - mc.load(excvalloc, r.r2, 0) + mc.load(excvalloc, r.SCRATCH, 0) if exctploc is not None: - mc.load(exctploc, r.r2, diff) + mc.load(exctploc, r.SCRATCH, diff) # Zero out the exception fields - mc.LGHI(r.r0, l.imm(0)) - mc.STG(r.r0, l.addr(0, r.r2)) - mc.STG(r.r0, l.addr(diff, r.r2)) + mc.LGHI(r.SCRATCH2, l.imm(0)) + mc.STG(r.SCRATCH2, l.addr(0, r.SCRATCH)) + mc.STG(r.SCRATCH2, l.addr(diff, r.SCRATCH)) def _restore_exception(self, mc, excvalloc, exctploc): - mc.load_imm(r.r2, self.cpu.pos_exc_value()) + mc.load_imm(r.SCRATCH, self.cpu.pos_exc_value()) diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() assert check_imm_value(diff) # Store the exception fields from the two registers - mc.STG(excvalloc, l.addr(0, r.r2)) - mc.STG(exctploc, l.addr(diff, r.r2)) + mc.STG(excvalloc, l.addr(0, r.SCRATCH)) + mc.STG(exctploc, l.addr(diff, r.SCRATCH)) def load_gcmap(self, mc, reg, gcmap): # load the current gcmap into register 'reg' diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -751,8 +751,8 @@ def emit_save_exc_class(self, op, arglocs, regalloc): [resloc] = arglocs - diff = self.mc.load_imm_plus(r.r2, self.cpu.pos_exception()) - self.mc.load(resloc, r.r2, diff) + diff = self.mc.load_imm_plus(r.SCRATCH, self.cpu.pos_exception()) + self.mc.load(resloc, r.SCRATCH, diff) def emit_save_exception(self, op, arglocs, regalloc): [resloc] = arglocs From pypy.commits at gmail.com Wed Dec 30 11:46:13 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 30 Dec 2015 08:46:13 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed an edge case: s390x's native instruction for memset can return in the middle of the copy (determined by the cpu), added a loop to ensure all bytes are copied Message-ID: <56840a55.87591c0a.d290e.ffffe227@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81503:7288aa624ed6 Date: 2015-12-30 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/7288aa624ed6/ Log: fixed an edge case: s390x's native instruction for memset can return in the middle of the copy (determined by the cpu), added a loop to ensure all bytes are copied diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -7,7 +7,8 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as l from rpython.jit.backend.zarch.pool import LiteralPool -from rpython.jit.backend.zarch.codebuilder import InstrBuilder +from rpython.jit.backend.zarch.codebuilder import (InstrBuilder, + OverwritingBuilder) from rpython.jit.backend.zarch.helper.regalloc import check_imm_value from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE from rpython.jit.backend.zarch.regalloc import ZARCHRegisterManager @@ -493,7 +494,8 @@ ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) #mc.LG(r.r2, l.addr(ofs, r.SPP)) patch_pos = mc.currpos() - self.mc.trap() + # XXX TODO + #self.mc.trap() #mc.TRAP2() # placeholder for cmpdi(0, r2, ...) #mc.TRAP2() # placeholder for bge #mc.TRAP2() # placeholder for li(r0, ...) @@ -844,7 +846,6 @@ for traps_pos, jmp_target in self.frame_depth_to_patch: pmc = OverwritingBuilder(self.mc, traps_pos, 3) # three traps, so exactly three instructions to patch here - xxx #pmc.cmpdi(0, r.r2.value, frame_depth) # 1 #pmc.bc(7, 0, jmp_target - (traps_pos + 4)) # 2 "bge+" #pmc.li(r.r0.value, frame_depth) # 3 diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -277,7 +277,7 @@ ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') self.mc.load_imm(r.SCRATCH, rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr))) - self.mc.STD(r.SCRATCH, l.addr(ofs, r.SPP)) + self.mc.STG(r.SCRATCH, l.addr(ofs, r.SPP)) def _find_nearby_operation(self, regalloc, delta): return regalloc.operations[regalloc.rm.position + delta] @@ -927,19 +927,22 @@ def emit_zero_array(self, op, arglocs, regalloc): base_loc, startindex_loc, length_loc, \ ofs_loc, itemsize_loc, pad_byte_loc = arglocs + print(op, arglocs) if ofs_loc.is_imm(): + assert check_imm_value(ofs_loc.value) self.mc.AGHI(base_loc, ofs_loc) else: self.mc.AGR(base_loc, ofs_loc) if startindex_loc.is_imm(): + assert check_imm_value(startindex_loc.value) self.mc.AGHI(base_loc, startindex_loc) else: self.mc.AGR(base_loc, startindex_loc) assert not length_loc.is_imm() - self.mc.SGR(pad_byte_loc, pad_byte_loc) - pad_byte_plus_one = r.odd_reg(pad_byte_loc) - self.mc.SGR(pad_byte_plus_one, pad_byte_plus_one) + self.mc.XGR(pad_byte_loc, pad_byte_loc) + pad_plus = r.odd_reg(pad_byte_loc) + self.mc.XGR(pad_plus, pad_plus) self.mc.XGR(r.SCRATCH, r.SCRATCH) # s390x has memset directly as a hardware instruction!! # it needs 5 registers allocated @@ -947,9 +950,15 @@ # pad_byte is rY to rY+1 # scratch register holds the value written to dst assert pad_byte_loc.is_even() + assert pad_plus.value == pad_byte_loc.value + 1 assert base_loc.is_even() assert length_loc.value == base_loc.value + 1 + assert base_loc.value != pad_byte_loc.value + # NOTE this instruction can (determined by the cpu), just + # quit the movement any time, thus it is looped until all bytes + # are copied! self.mc.MVCLE(base_loc, pad_byte_loc, l.addr(0, r.SCRATCH)) + self.mc.BCR(c.OF, l.imm(-self.mc.MVCLE_byte_count)) class ForceOpAssembler(object): From pypy.commits at gmail.com Wed Dec 30 11:48:24 2015 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 30 Dec 2015 08:48:24 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: BRC is not BCR!! Message-ID: <56840ad8.e686c20a.322f2.ffffd461@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81504:0d5b4291c580 Date: 2015-12-30 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/0d5b4291c580/ Log: BRC is not BCR!! diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -958,7 +958,7 @@ # quit the movement any time, thus it is looped until all bytes # are copied! self.mc.MVCLE(base_loc, pad_byte_loc, l.addr(0, r.SCRATCH)) - self.mc.BCR(c.OF, l.imm(-self.mc.MVCLE_byte_count)) + self.mc.BRC(c.OF, l.imm(-self.mc.MVCLE_byte_count)) class ForceOpAssembler(object): From pypy.commits at gmail.com Thu Dec 31 03:50:52 2015 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Dec 2015 00:50:52 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: implementing realloc frame Message-ID: <5684ec6c.6953c20a.10e25.ffffe6da@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81505:35dcb2c4de31 Date: 2015-12-31 09:50 +0100 http://bitbucket.org/pypy/pypy/changeset/35dcb2c4de31/ Log: implementing realloc frame diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -298,10 +298,9 @@ # g) restore registers and return mc = InstrBuilder() self.mc = mc - return # signature of this _frame_realloc_slowpath function: - # * on entry, r0 is the new size + # * on entry, r3 is the new size # * on entry, r2 is the gcmap # * no managed register must be modified @@ -311,38 +310,43 @@ self._push_core_regs_to_jitframe(mc) self._push_fp_regs_to_jitframe(mc) - # Save away the LR inside r30 - #mc.mflr(r.RCS1.value) + self.mc.store_link() # First argument is SPP (= r31), which is the jitframe - mc.mr(r.r3.value, r.SPP.value) + mc.LGR(r.r2, r.SPP) - # Second argument is the new size, which is still in r0 here - mc.mr(r.r4.value, r.r0.value) + # no need to move second argument (frame_depth), + # it is already in register r3! + + RCS2 = r.r10 + RCS3 = r.r12 # This trashes r0 and r2 - self._store_and_reset_exception(mc, r.RCS2, r.RCS3) + self._store_and_reset_exception(mc, RCS2, RCS3) # Do the call adr = rffi.cast(lltype.Signed, self.cpu.realloc_frame) + mc.push_std_frame() mc.load_imm(mc.RAW_CALL_REG, adr) mc.raw_call() + mc.pop_std_frame() # The result is stored back into SPP (= r31) - mc.mr(r.SPP.value, r.r3.value) + mc.LGR(r.SPP, r.r2) - self._restore_exception(mc, r.RCS2, r.RCS3) + self._restore_exception(mc, RCS2, RCS3) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: + xxx diff = mc.load_imm_plus(r.r5, gcrootmap.get_root_stack_top_addr()) mc.load(r.r5.value, r.r5.value, diff) mc.store(r.r3.value, r.r5.value, -WORD) - mc.mtlr(r.RCS1.value) # restore LR + mc.restore_link() self._pop_core_regs_from_jitframe(mc) self._pop_fp_regs_from_jitframe(mc) - mc.blr() + mc.BCR(c.ANY, r.RETURN) self._frame_realloc_slowpath = mc.materialize(self.cpu, []) self.mc = None @@ -492,17 +496,19 @@ """ descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu) ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr) - #mc.LG(r.r2, l.addr(ofs, r.SPP)) + mc.LG(r.r2, l.addr(ofs, r.SPP)) patch_pos = mc.currpos() - # XXX TODO - #self.mc.trap() - #mc.TRAP2() # placeholder for cmpdi(0, r2, ...) - #mc.TRAP2() # placeholder for bge - #mc.TRAP2() # placeholder for li(r0, ...) - #mc.load_imm(r.SCRATCH2, self._frame_realloc_slowpath) - #mc.mtctr(r.SCRATCH2.value) - #self.load_gcmap(mc, r.r2, gcmap) - #mc.bctrl() + # placeholder for the following instructions + # CGRL r2, ... (6 bytes) + # BRC c, ... (4 bytes) + # LGHI r3, ... (4 bytes) + # sum -> (14 bytes) + mc.write('\x00'*14) + mc.load_imm(r.RETURN, self._frame_realloc_slowpath) + self.load_gcmap(mc, r.r2, gcmap) + self.mc.push_std_frame() + mc.BCR(c.ANY, r.RETURN) + self.mc.pop_std_frame() self.frame_depth_to_patch.append((patch_pos, mc.currpos())) @@ -846,10 +852,10 @@ for traps_pos, jmp_target in self.frame_depth_to_patch: pmc = OverwritingBuilder(self.mc, traps_pos, 3) # three traps, so exactly three instructions to patch here - #pmc.cmpdi(0, r.r2.value, frame_depth) # 1 - #pmc.bc(7, 0, jmp_target - (traps_pos + 4)) # 2 "bge+" - #pmc.li(r.r0.value, frame_depth) # 3 - #pmc.overwrite() + pmc.CGRL(r.r2, l.imm(frame_depth)) + pmc.BRC(c.EQ, jmp_target - (traps_pos + 6)) + pmc.LGHI(r.r3, frame_depth) + pmc.overwrite() def materialize_loop(self, looptoken): self.datablockwrapper.done() diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.udir import udir from rpython.jit.backend.detect_cpu import autodetect +from rpython.jit.backend.zarch.arch import WORD clear_cache = rffi.llexternal( "__clear_cache", @@ -197,11 +198,17 @@ """ self.BASR(r.RETURN, call_reg) - def alloc_std_frame(self): + def store_link(self): + self.STG(r.RETURN, l.addr(14*WORD, r.SP)) + + def restore_link(self): + self.LG(r.RETURN, l.addr(14*WORD, r.SP)) + + def push_std_frame(self): self.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) self.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) - def restore_std_frame(self): + def pop_std_frame(self): self.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) class OverwritingBuilder(BlockBuilderMixin, AbstractZARCHBuilder): diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -919,10 +919,10 @@ self.mc.AGHI(r.r3, l.imm(basesize)) self.mc.AGHI(r.r2, l.imm(basesize)) - self.mc.alloc_std_frame() + self.mc.push_std_frame() self.mc.load_imm(self.mc.RAW_CALL_REG, self.memcpy_addr) self.mc.raw_call() - self.mc.restore_std_frame() + self.mc.pop_std_frame() def emit_zero_array(self, op, arglocs, regalloc): base_loc, startindex_loc, length_loc, \ From pypy.commits at gmail.com Thu Dec 31 05:04:43 2015 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Dec 2015 02:04:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: realloc frame is nearly working, it seems though the token is not different from the frame? Message-ID: <5684fdbb.460f1c0a.66d1d.5ce1@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r81506:25e9fedba6e8 Date: 2015-12-31 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/25e9fedba6e8/ Log: realloc frame is nearly working, it seems though the token is not different from the frame? diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -321,7 +321,6 @@ RCS2 = r.r10 RCS3 = r.r12 - # This trashes r0 and r2 self._store_and_reset_exception(mc, RCS2, RCS3) # Do the call @@ -344,7 +343,8 @@ mc.store(r.r3.value, r.r5.value, -WORD) mc.restore_link() - self._pop_core_regs_from_jitframe(mc) + # do not restore r2, thus [1:] + self._pop_core_regs_from_jitframe(mc, r.MANAGED_REGS[1:]) self._pop_fp_regs_from_jitframe(mc) mc.BCR(c.ANY, r.RETURN) @@ -409,13 +409,9 @@ self._push_fp_regs_to_jitframe(mc) # allocate a stack frame! - mc.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) # store the backchain - mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) - - # Do the call + mc.push_std_frame() mc.raw_call(r.r12) - - mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + mc.pop_std_frame() # Finish self._reload_frame_if_necessary(mc) @@ -437,15 +433,14 @@ # registers). mc = InstrBuilder() # - mc.STG(r.r14, l.addr(14*WORD, r.SP)) + mc.trap() + # mc.STG(r.r14, l.addr(14*WORD, r.SP)) # Do the call - # use SP as single parameter for the call - mc.STG(r.SP, l.addr(0, r.SP)) # store the backchain - mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + mc.push_std_frame() mc.LGR(r.r2, r.SP) mc.load_imm(mc.RAW_CALL_REG, slowpathaddr) mc.raw_call() - mc.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + mc.pop_std_frame() # # Check if it raised StackOverflow mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) @@ -455,7 +450,7 @@ mc.cmp_op(r.SCRATCH, 0, imm=True) # # So we return to our caller, conditionally if "EQ" - mc.LG(r.r14, l.addr(14*WORD, r.SP)) + # mc.LG(r.r14, l.addr(14*WORD, r.SP)) mc.BCR(c.EQ, r.r14) # # Else, jump to propagate_exception_path @@ -479,6 +474,7 @@ endaddr, lengthaddr, _ = self.cpu.insert_stack_check() diff = lengthaddr - endaddr assert check_imm_value(diff) + xxx mc = self.mc mc.load_imm(r.SCRATCH, self.stack_check_slowpath) @@ -499,15 +495,15 @@ mc.LG(r.r2, l.addr(ofs, r.SPP)) patch_pos = mc.currpos() # placeholder for the following instructions - # CGRL r2, ... (6 bytes) + # CGFI r2, ... (6 bytes) # BRC c, ... (4 bytes) # LGHI r3, ... (4 bytes) # sum -> (14 bytes) mc.write('\x00'*14) + self.mc.push_std_frame() mc.load_imm(r.RETURN, self._frame_realloc_slowpath) self.load_gcmap(mc, r.r2, gcmap) - self.mc.push_std_frame() - mc.BCR(c.ANY, r.RETURN) + mc.raw_call() self.mc.pop_std_frame() self.frame_depth_to_patch.append((patch_pos, mc.currpos())) @@ -852,9 +848,9 @@ for traps_pos, jmp_target in self.frame_depth_to_patch: pmc = OverwritingBuilder(self.mc, traps_pos, 3) # three traps, so exactly three instructions to patch here - pmc.CGRL(r.r2, l.imm(frame_depth)) - pmc.BRC(c.EQ, jmp_target - (traps_pos + 6)) - pmc.LGHI(r.r3, frame_depth) + pmc.CGFI(r.r2, l.imm(frame_depth)) + pmc.BRC(c.EQ, l.imm(jmp_target - (traps_pos + 6))) + pmc.LGHI(r.r3, l.imm(frame_depth)) pmc.overwrite() def materialize_loop(self, looptoken): @@ -910,11 +906,6 @@ relative_target)) def _call_header(self): - # Reserve space for a function descriptor, 3 words - #self.mc.write64(0) - #self.mc.write64(0) - #self.mc.write64(0) - # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES self.mc.STMG(r.r6, r.r15, l.addr(6*WORD, r.SP)) # save the back chain diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -206,10 +206,10 @@ def push_std_frame(self): self.STG(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) - self.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) + self.LAY(r.SP, l.addr(-STD_FRAME_SIZE_IN_BYTES, r.SP)) def pop_std_frame(self): - self.AGHI(r.SP, l.imm(STD_FRAME_SIZE_IN_BYTES)) + self.LAY(r.SP, l.addr(STD_FRAME_SIZE_IN_BYTES, r.SP)) class OverwritingBuilder(BlockBuilderMixin, AbstractZARCHBuilder): def __init__(self, mc, start, num_insts=0): From pypy.commits at gmail.com Thu Dec 31 05:43:41 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:43:41 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: hg merge default Message-ID: <568506dd.8a75c20a.13c37.ffffe21d@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2499:2075cb3f448f Date: 2015-12-29 14:33 +0100 http://bitbucket.org/cffi/cffi/changeset/2075cb3f448f/ Log: hg merge default diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -266,18 +266,7 @@ /* whenever running Python code, the errno is saved in this thread-local variable */ #ifndef MS_WIN32 -# ifdef USE__THREAD -/* This macro ^^^ is defined by setup.py if it finds that it is - syntactically valid to use "__thread" with this C compiler. */ -static __thread int cffi_saved_errno = 0; -static void save_errno(void) { cffi_saved_errno = errno; } -static void restore_errno(void) { errno = cffi_saved_errno; } -static void init_errno(void) { } -# else -# include "misc_thread.h" -# endif -# define save_errno_only save_errno -# define restore_errno_only restore_errno +# include "misc_thread_posix.h" #endif #include "minibuffer.h" @@ -290,8 +279,11 @@ # include "wchar_helper.h" #endif -typedef PyObject *const cffi_allocator_t[3]; -static cffi_allocator_t default_allocator = { NULL, NULL, NULL }; +typedef struct _cffi_allocator_s { + PyObject *ca_alloc, *ca_free; + int ca_dont_clear; +} cffi_allocator_t; +static const cffi_allocator_t default_allocator = { NULL, NULL, 0 }; static PyObject *FFIError; static PyObject *unique_cache; @@ -3030,21 +3022,18 @@ static CDataObject *allocate_with_allocator(Py_ssize_t basesize, Py_ssize_t datasize, CTypeDescrObject *ct, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CDataObject *cd; - PyObject *my_alloc = allocator[0]; - PyObject *my_free = allocator[1]; - PyObject *dont_clear_after_alloc = allocator[2]; - - if (my_alloc == NULL) { /* alloc */ + + if (allocator->ca_alloc == NULL) { cd = allocate_owning_object(basesize + datasize, ct); if (cd == NULL) return NULL; cd->c_data = ((char *)cd) + basesize; } else { - PyObject *res = PyObject_CallFunction(my_alloc, "n", datasize); + PyObject *res = PyObject_CallFunction(allocator->ca_alloc, "n", datasize); if (res == NULL) return NULL; @@ -3069,16 +3058,16 @@ return NULL; } - cd = allocate_gcp_object(cd, ct, my_free); + cd = allocate_gcp_object(cd, ct, allocator->ca_free); Py_DECREF(res); } - if (dont_clear_after_alloc == NULL) + if (!allocator->ca_dont_clear) memset(cd->c_data, 0, datasize); return cd; } static PyObject *direct_newp(CTypeDescrObject *ct, PyObject *init, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CTypeDescrObject *ctitem; CDataObject *cd; @@ -3183,7 +3172,7 @@ PyObject *init = Py_None; if (!PyArg_ParseTuple(args, "O!|O:newp", &CTypeDescr_Type, &ct, &init)) return NULL; - return direct_newp(ct, init, default_allocator); + return direct_newp(ct, init, &default_allocator); } static int @@ -4659,7 +4648,9 @@ if (cif_descr != NULL) { /* exchange data size */ - cif_descr->exchange_size = exchange_offset; + /* we also align it to the next multiple of 8, in an attempt to + work around bugs(?) of libffi like #241 */ + cif_descr->exchange_size = ALIGN_ARG(exchange_offset); } return 0; } @@ -5101,15 +5092,9 @@ { save_errno(); { -#ifdef WITH_THREAD - PyGILState_STATE state = PyGILState_Ensure(); -#endif - + PyGILState_STATE state = gil_ensure(); general_invoke_callback(1, result, (char *)args, userdata); - -#ifdef WITH_THREAD - PyGILState_Release(state); -#endif + gil_release(state); } restore_errno(); } @@ -6515,7 +6500,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("1.3.1"); + v = PyText_FromString("1.4.2"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; @@ -6542,7 +6527,7 @@ INITERROR; } - init_errno(); + init_cffi_tls(); if (PyErr_Occurred()) INITERROR; diff --git a/c/call_python.c b/c/call_python.c --- a/c/call_python.c +++ b/c/call_python.c @@ -24,9 +24,6 @@ static PyObject *_ffi_def_extern_decorator(PyObject *outer_args, PyObject *fn) { -#if PY_MAJOR_VERSION >= 3 -# error review! -#endif char *s; PyObject *error, *onerror, *infotuple, *old1; int index, err; @@ -43,10 +40,10 @@ return NULL; if (s == NULL) { - PyObject *name = PyObject_GetAttrString(fn, "__name__"); + name = PyObject_GetAttrString(fn, "__name__"); if (name == NULL) return NULL; - s = PyString_AsString(name); + s = PyText_AsUTF8(name); if (s == NULL) { Py_DECREF(name); return NULL; @@ -203,9 +200,7 @@ err = 1; } else { -#ifdef WITH_THREAD - PyGILState_STATE state = PyGILState_Ensure(); -#endif + PyGILState_STATE state = gil_ensure(); if (externpy->reserved1 != PyThreadState_GET()->interp->modules) { /* Update the (reserved1, reserved2) cache. This will fail if we didn't call @ffi.def_extern() in this particular @@ -215,9 +210,7 @@ if (!err) { general_invoke_callback(0, args, args, externpy->reserved2); } -#ifdef WITH_THREAD - PyGILState_Release(state); -#endif + gil_release(state); } if (err) { static const char *msg[2] = { diff --git a/c/cffi1_module.c b/c/cffi1_module.c --- a/c/cffi1_module.c +++ b/c/cffi1_module.c @@ -22,7 +22,7 @@ static int init_ffi_lib(PyObject *m) { PyObject *x; - int i; + int i, res; static char init_done = 0; if (PyType_Ready(&FFI_Type) < 0) @@ -48,11 +48,13 @@ for (i = 0; all_dlopen_flags[i].name != NULL; i++) { x = PyInt_FromLong(all_dlopen_flags[i].value); - if (x == NULL || PyDict_SetItemString(FFI_Type.tp_dict, - all_dlopen_flags[i].name, - x) < 0) + if (x == NULL) return -1; + res = PyDict_SetItemString(FFI_Type.tp_dict, + all_dlopen_flags[i].name, x); Py_DECREF(x); + if (res < 0) + return -1; } init_done = 1; } diff --git a/c/commontypes.c b/c/commontypes.c --- a/c/commontypes.c +++ b/c/commontypes.c @@ -199,7 +199,8 @@ static PyObject *b__get_common_types(PyObject *self, PyObject *arg) { - int i, err; + int err; + size_t i; for (i = 0; i < num_common_simple_types; i++) { const char *s = common_simple_types[i]; PyObject *o = PyText_FromString(s + strlen(s) + 1); diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -335,7 +335,7 @@ "pointer to the memory somewhere else, e.g. into another structure."); static PyObject *_ffi_new(FFIObject *self, PyObject *args, PyObject *kwds, - cffi_allocator_t allocator) + const cffi_allocator_t *allocator) { CTypeDescrObject *ct; PyObject *arg, *init = Py_None; @@ -353,15 +353,22 @@ static PyObject *ffi_new(FFIObject *self, PyObject *args, PyObject *kwds) { - return _ffi_new(self, args, kwds, default_allocator); + return _ffi_new(self, args, kwds, &default_allocator); } static PyObject *_ffi_new_with_allocator(PyObject *allocator, PyObject *args, PyObject *kwds) { + cffi_allocator_t alloc1; + PyObject *my_alloc, *my_free; + my_alloc = PyTuple_GET_ITEM(allocator, 1); + my_free = PyTuple_GET_ITEM(allocator, 2); + alloc1.ca_alloc = (my_alloc == Py_None ? NULL : my_alloc); + alloc1.ca_free = (my_free == Py_None ? NULL : my_free); + alloc1.ca_dont_clear = (PyTuple_GET_ITEM(allocator, 3) == Py_False); + return _ffi_new((FFIObject *)PyTuple_GET_ITEM(allocator, 0), - args, kwds, - &PyTuple_GET_ITEM(allocator, 1)); + args, kwds, &alloc1); } PyDoc_STRVAR(ffi_new_allocator_doc, @@ -396,27 +403,14 @@ return NULL; } - allocator = PyTuple_New(4); + allocator = PyTuple_Pack(4, + (PyObject *)self, + my_alloc, + my_free, + PyBool_FromLong(should_clear_after_alloc)); if (allocator == NULL) return NULL; - Py_INCREF(self); - PyTuple_SET_ITEM(allocator, 0, (PyObject *)self); - - if (my_alloc != Py_None) { - Py_INCREF(my_alloc); - PyTuple_SET_ITEM(allocator, 1, my_alloc); - } - if (my_free != Py_None) { - Py_INCREF(my_free); - PyTuple_SET_ITEM(allocator, 2, my_free); - } - if (!should_clear_after_alloc) { - PyObject *my_true = Py_True; - Py_INCREF(my_true); - PyTuple_SET_ITEM(allocator, 3, my_true); /* dont_clear_after_alloc */ - } - { static PyMethodDef md = {"allocator", (PyCFunction)_ffi_new_with_allocator, @@ -896,7 +890,14 @@ #endif PyDoc_STRVAR(ffi_init_once_doc, - "XXX document me"); +"init_once(function, tag): run function() once. More precisely,\n" +"'function()' is called the first time we see a given 'tag'.\n" +"\n" +"The return value of function() is remembered and returned by the current\n" +"and all future init_once() with the same tag. If init_once() is called\n" +"from multiple threads in parallel, all calls block until the execution\n" +"of function() is done. If function() raises an exception, it is\n" +"propagated and nothing is cached."); #if PY_MAJOR_VERSION < 3 /* PyCapsule_New is redefined to be PyCObject_FromVoidPtr in _cffi_backend, diff --git a/c/lib_obj.c b/c/lib_obj.c --- a/c/lib_obj.c +++ b/c/lib_obj.c @@ -459,6 +459,7 @@ static PyObject *lib_getattr(LibObject *lib, PyObject *name) { + char *p; PyObject *x; LIB_GET_OR_CACHE_ADDR(x, lib, name, goto missing); @@ -469,16 +470,25 @@ return x; missing: - if (strcmp(PyText_AsUTF8(name), "__all__") == 0) { + p = PyText_AsUTF8(name); + if (p == NULL) + return NULL; + if (strcmp(p, "__all__") == 0) { PyErr_Clear(); return _lib_dir1(lib, 1); } - if (strcmp(PyText_AsUTF8(name), "__dict__") == 0) { + if (strcmp(p, "__dict__") == 0) { PyErr_Clear(); return _lib_dict(lib); } + if (strcmp(p, "__class__") == 0) { + PyErr_Clear(); + x = (PyObject *)Py_TYPE(lib); + Py_INCREF(x); + return x; + } /* this hack is for Python 3.5 */ - if (strcmp(PyText_AsUTF8(name), "__name__") == 0) { + if (strcmp(p, "__name__") == 0) { PyErr_Clear(); return lib_repr(lib); } diff --git a/c/misc_thread.h b/c/misc_thread.h deleted file mode 100644 --- a/c/misc_thread.h +++ /dev/null @@ -1,23 +0,0 @@ -#include - -/* This is only included if GCC doesn't support "__thread" global variables. - * See USE__THREAD in _ffi_backend.c. - */ - -static pthread_key_t cffi_tls_key; - -static void init_errno(void) -{ - (void) pthread_key_create(&cffi_tls_key, NULL); -} - -static void save_errno(void) -{ - intptr_t value = errno; - (void) pthread_setspecific(cffi_tls_key, (void *)value); -} - -static void restore_errno(void) { - intptr_t value = (intptr_t)pthread_getspecific(cffi_tls_key); - errno = value; -} diff --git a/c/misc_thread_posix.h b/c/misc_thread_posix.h new file mode 100644 --- /dev/null +++ b/c/misc_thread_posix.h @@ -0,0 +1,186 @@ +/* + Logic for a better replacement of PyGILState_Ensure(). + + This version is ready to handle the case of a non-Python-started + thread in which we do a large number of calls to CFFI callbacks. If + we were to rely on PyGILState_Ensure() for that, we would constantly + be creating and destroying PyThreadStates---it is slow, and + PyThreadState_Delete() will actually walk the list of all thread + states, making it O(n). :-( + + This version only creates one PyThreadState object the first time we + see a given thread, and keep it alive until the thread is really + shut down, using a destructor on the tls key. +*/ + +#ifdef WITH_THREAD +#include + + +static pthread_key_t cffi_tls_key; + +struct cffi_tls_s { + /* The locally-made thread state. This is only non-null in case + we build the thread state here. It remains null if this thread + had already a thread state provided by CPython. */ + PyThreadState *local_thread_state; + + /* The saved errno. If the C compiler supports '__thread', then + we use that instead; this value is not used at all in this case. */ + int saved_errno; +}; + +static void _tls_destructor(void *p) +{ + struct cffi_tls_s *tls = (struct cffi_tls_s *)p; + + if (tls->local_thread_state != NULL) { + /* We need to re-acquire the GIL temporarily to free the + thread state. I hope it is not a problem to do it in + a thread-local destructor. + */ + PyEval_RestoreThread(tls->local_thread_state); + PyThreadState_DeleteCurrent(); + } + free(tls); +} + +static void init_cffi_tls(void) +{ + if (pthread_key_create(&cffi_tls_key, _tls_destructor) != 0) + PyErr_SetString(PyExc_OSError, "pthread_key_create() failed"); +} + +static struct cffi_tls_s *_make_cffi_tls(void) +{ + void *p = calloc(1, sizeof(struct cffi_tls_s)); + if (p == NULL) + return NULL; + if (pthread_setspecific(cffi_tls_key, p) != 0) { + free(p); + return NULL; + } + return p; +} + +static struct cffi_tls_s *get_cffi_tls(void) +{ + void *p = pthread_getspecific(cffi_tls_key); + if (p == NULL) + p = _make_cffi_tls(); + return (struct cffi_tls_s *)p; +} + + +/* USE__THREAD is defined by setup.py if it finds that it is + syntactically valid to use "__thread" with this C compiler. */ +#ifdef USE__THREAD + +static __thread int cffi_saved_errno = 0; +static void save_errno(void) { cffi_saved_errno = errno; } +static void restore_errno(void) { errno = cffi_saved_errno; } + +#else + +static void save_errno(void) +{ + int saved = errno; + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + tls->saved_errno = saved; +} + +static void restore_errno(void) +{ + struct cffi_tls_s *tls = get_cffi_tls(); + if (tls != NULL) + errno = tls->saved_errno; +} + +#endif + + +/* Seems that CPython 3.5.1 made our job harder. Did not find out how + to do that without these hacks. We can't use PyThreadState_GET(), + because that calls PyThreadState_Get() which fails an assert if the + result is NULL. */ +#if PY_MAJOR_VERSION >= 3 && !defined(_Py_atomic_load_relaxed) + /* this was abruptly un-defined in 3.5.1 */ +void *volatile _PyThreadState_Current; + /* XXX simple volatile access is assumed atomic */ +# define _Py_atomic_load_relaxed(pp) (*(pp)) +#endif + + +static PyThreadState *get_current_ts(void) +{ +#if PY_MAJOR_VERSION >= 3 + return (PyThreadState*)_Py_atomic_load_relaxed(&_PyThreadState_Current); +#else + return _PyThreadState_Current; +#endif +} + +static PyGILState_STATE gil_ensure(void) +{ + /* Called at the start of a callback. Replacement for + PyGILState_Ensure(). + */ + PyGILState_STATE result; + struct cffi_tls_s *tls; + PyThreadState *ts = PyGILState_GetThisThreadState(); + + if (ts != NULL) { + ts->gilstate_counter++; + if (ts != get_current_ts()) { + /* common case: 'ts' is our non-current thread state and + we have to make it current and acquire the GIL */ + PyEval_RestoreThread(ts); + return PyGILState_UNLOCKED; + } + else { + return PyGILState_LOCKED; + } + } + else { + /* no thread state here so far. */ + result = PyGILState_Ensure(); + assert(result == PyGILState_UNLOCKED); + + ts = PyGILState_GetThisThreadState(); + assert(ts != NULL); + assert(ts == get_current_ts()); + assert(ts->gilstate_counter >= 1); + + /* Save the now-current thread state inside our 'local_thread_state' + field, to be removed at thread shutdown */ + tls = get_cffi_tls(); + if (tls != NULL) { + tls->local_thread_state = ts; + ts->gilstate_counter++; + } + + return result; + } +} + +static void gil_release(PyGILState_STATE oldstate) +{ + PyGILState_Release(oldstate); +} + + +#else /* !WITH_THREAD */ + +static int cffi_saved_errno = 0; +static void save_errno(void) { cffi_saved_errno = errno; } +static void restore_errno(void) { errno = cffi_saved_errno; } + +static PyGILState_STATE gil_ensure(void) { return -1; } +static void gil_release(PyGILState_STATE oldstate) { } + +#endif /* !WITH_THREAD */ + + +#define save_errno_only save_errno +#define restore_errno_only restore_errno diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -10,7 +10,7 @@ static DWORD cffi_tls_index = TLS_OUT_OF_INDEXES; -static void init_errno(void) +static void init_cffi_tls(void) { if (cffi_tls_index == TLS_OUT_OF_INDEXES) { cffi_tls_index = TlsAlloc(); @@ -182,6 +182,17 @@ } #endif + +#ifdef WITH_THREAD +/* XXX should port the code from misc_thread_posix.h */ +static PyGILState_STATE gil_ensure(void) { return PyGILState_Ensure(); } +static void gil_release(PyGILState_STATE oldst) { PyGILState_Release(oldst); } +#else +static PyGILState_STATE gil_ensure(void) { return -1; } +static void gil_release(PyGILState_STATE oldstate) { } +#endif + + /************************************************************/ /* Emulate dlopen()&co. from the Windows API */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -12,7 +12,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.3.1", ("This test_c.py file is for testing a version" +assert __version__ == "1.4.2", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.3.1" -__version_info__ = (1, 3, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -73,6 +73,7 @@ self._included_ffis = [] self._windows_unicode = None self._init_once_cache = {} + self._cdef_version = None self._embedding_init_code = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) @@ -106,6 +107,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: + self._cdef_version = object() self._parser.parse(csource, override=override, packed=packed, dllexport=dllexport) self._cdefsources.append(csource) @@ -592,14 +594,15 @@ recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) - def compile(self, tmpdir='.'): + def compile(self, tmpdir='.', verbose=0): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, - source_extension=source_extension, **kwds) + source_extension=source_extension, + compiler_verbose=verbose, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if @@ -660,70 +663,70 @@ import os backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) - copied_enums = [] # - def make_accessor_locked(name): + def accessor_function(name): key = 'function ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - try: - value = backendlib.load_function(BType, name) - except KeyError as e: - raise AttributeError('%s: %s' % (name, e)) - library.__dict__[name] = value + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + try: + value = backendlib.load_function(BType, name) + except KeyError as e: + raise AttributeError('%s: %s' % (name, e)) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: return # - key = 'variable ' + name - if key in ffi._parser._declarations: - tp, _ = ffi._parser._declarations[key] - BType = ffi._get_cached_btype(tp) - read_variable = backendlib.read_variable - write_variable = backendlib.write_variable - setattr(FFILibrary, name, property( - lambda self: read_variable(BType, name), - lambda self, value: write_variable(BType, name, value))) - return - # - if not copied_enums: - from . import model - error = None - for key, (tp, _) in ffi._parser._declarations.items(): - if not isinstance(tp, model.EnumType): - continue - try: - tp.check_not_partial() - except Exception as e: - error = e - continue - for enumname, enumval in zip(tp.enumerators, tp.enumvalues): - if enumname not in library.__dict__: - library.__dict__[enumname] = enumval - if error is not None: - if name in library.__dict__: - return # ignore error, about a different enum - raise error - - for key, val in ffi._parser._int_constants.items(): - if key not in library.__dict__: - library.__dict__[key] = val - - copied_enums.append(True) - if name in library.__dict__: - return - # - key = 'constant ' + name - if key in ffi._parser._declarations: - raise NotImplementedError("fetching a non-integer constant " - "after dlopen()") - # - raise AttributeError(name) + from . import model + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock - make_accessor_locked(name) + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): @@ -737,6 +740,10 @@ setattr(self, name, value) else: property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() # if libname is not None: try: diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -17,15 +17,16 @@ def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] - allsources.extend(sources) + for src in sources: + allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) -def compile(tmpdir, ext): +def compile(tmpdir, ext, compiler_verbose=0): """Compile a C extension module using distutils.""" saved_environ = os.environ.copy() try: - outputfilename = _build(tmpdir, ext) + outputfilename = _build(tmpdir, ext, compiler_verbose) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can @@ -35,10 +36,10 @@ os.environ[key] = value return outputfilename -def _build(tmpdir, ext): +def _build(tmpdir, ext, compiler_verbose=0): # XXX compact but horrible :-( from distutils.core import Distribution - import distutils.errors + import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() @@ -48,7 +49,12 @@ options['build_temp'] = ('ffiplatform', tmpdir) # try: - dist.run_command('build_ext') + old_level = distutils.log.set_threshold(0) or 0 + try: + distutils.log.set_verbosity(compiler_verbose) + dist.run_command('build_ext') + finally: + distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1354,7 +1354,8 @@ return os.path.join(outputdir, *parts), parts def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, **kwds): + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: @@ -1374,7 +1375,7 @@ cwd = os.getcwd() try: os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext) + outputfilename = ffiplatform.compile('.', ext, compiler_verbose) finally: os.chdir(cwd) return outputfilename diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -62,7 +62,7 @@ ffi.cdef("C-like declarations with '...'") if __name__ == "__main__": - ffi.compile() + ffi.compile(verbose=True) Running ``python foo_build.py`` produces a file ``_foo.c`` and invokes the C compiler to turn it into a file ``_foo.so`` (or @@ -134,7 +134,8 @@ Similarly, the ``lib`` objects returned by the C version are read-only, apart from writes to global variables. Also, ``lib.__dict__`` does not work before version 1.2 or if ``lib`` happens to declare a name -called ``__dict__`` (use instead ``dir(lib)``). +called ``__dict__`` (use instead ``dir(lib)``). The same is true +for ``lib.__class__`` before version 1.4. ffi.cdef(): declaring types and functions @@ -504,7 +505,8 @@ the mtime to be updated anyway, delete the file before calling the functions. -**ffi.compile(tmpdir='.'):** explicitly generate the .py or .c file, +**ffi.compile(tmpdir='.', verbose=False):** +explicitly generate the .py or .c file, and (if .c) compile it. The output file is (or are) put in the directory given by ``tmpdir``. In the examples given here, we use ``if __name__ == "__main__": ffi.compile()`` in the build scripts---if @@ -513,6 +515,11 @@ to ``set_source()``, then a corresponding subdirectory of the ``tmpdir`` is used.) +*New in version 1.4:* ``verbose`` argument. If True, it prints the +usual distutils output, including the command lines that call the +compiler. (This parameter might be changed to True by default in a +future release.) + **ffi.emit_python_code(filename):** generate the given .py file (same as ``ffi.compile()`` for ABI mode, with an explicitly-named file to write). If you choose, you can include this .py file pre-packaged in diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,9 +45,9 @@ # built documents. # # The short X.Y version. -version = '1.3' +version = '1.4' # The full version, including alpha/beta/rc tags. -release = '1.3.1' +release = '1.4.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -51,11 +51,11 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-1.3.1.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-1.4.2.tar.gz - - MD5: ... + - MD5: 81357fe5042d00650b85b728cc181df2 - - SHA: ... + - SHA: 76cff6f1ff5bfb2b9c6c8e2cfa8bf90b5c944394 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -433,11 +433,11 @@ out-of-line API mode. The next section about Callbacks_ describes the ABI-mode solution. -This is *new in version 1.4.* Use Callbacks_ if backward compatibility -is an issue. (The original callbacks are slower to invoke and have -the same issue as libffi's callbacks; notably, see the warning__. -The new style described in the present section does not use libffi's -callbacks at all.) +This is *new in version 1.4.* Use old-style Callbacks_ if backward +compatibility is an issue. (The original callbacks are slower to +invoke and have the same issue as libffi's callbacks; notably, see the +warning__. The new style described in the present section does not +use libffi's callbacks at all.) .. __: Callbacks_ @@ -459,33 +459,35 @@ from _my_example import ffi, lib @ffi.def_extern() - def my_callback(fooptr, value): + def my_callback(x, y): return 42 -You can get a ```` pointer-to-function object from +You obtain a ```` pointer-to-function object by getting ``lib.my_callback``. This ```` can be passed to C code and then works like a callback: when the C code calls this function pointer, the Python function ``my_callback`` is called. (You need to pass ``lib.my_callback`` to C code, and not ``my_callback``: the -latter is just a plain Python function that cannot be passed to C.) +latter is just the Python function above, which cannot be passed to C.) CFFI implements this by defining ``my_callback`` as a static C function, written after the ``set_source()`` code. The ```` then points to this function. What this function does is invoke the -Python function object that was dynamically attached by +Python function object that is, at runtime, attached with ``@ffi.def_extern()``. -Each function from the cdef with ``extern "Python"`` turns into only -one C function. To support some corner cases, it is possible to -redefine the attached Python function by calling ``@ffi.def_extern()`` -again---but this is not recommended! Better write the Python function -more flexibly in the first place. Calling ``@ffi.def_extern()`` again -changes the C logic to call the new Python function; the old Python -function is not callable any more and the C function pointer you get -from ``lib.my_function`` is always the same. +The ``@ffi.def_extern()`` decorator should be applied to a global +function, once. This is because each function from the cdef with +``extern "Python"`` turns into only one C function. To support some +corner cases, it is possible to redefine the attached Python function +by calling ``@ffi.def_extern()`` again---but this is not recommended! +Better write the single global Python function more flexibly in the +first place. Calling ``@ffi.def_extern()`` again changes the C logic +to call the new Python function; the old Python function is not +callable any more and the C function pointer you get from +``lib.my_function`` is always the same. -Extern "Python" and "void *" arguments -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Extern "Python" and ``void *`` arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As described just before, you cannot use ``extern "Python"`` to make a variable number of C function pointers. However, achieving that @@ -567,11 +569,13 @@ In case you want to access some ``extern "Python"`` function directly from the C code written in ``set_source()``, you need to write a forward static declaration. The real implementation of this function -is added by CFFI *after* the C code---this is needed because even the +is added by CFFI *after* the C code---this is needed because the declaration might use types defined by ``set_source()`` (e.g. ``event_t`` above, from the ``#include``), so it cannot be generated before. +:: + ffi.set_source("_demo_cffi", """ #include @@ -589,11 +593,12 @@ int my_algo(int); """) ffi.set_source("_example_cffi", """ - static int f(int); + static int f(int); /* the forward declaration */ + static int my_algo(int n) { int i, sum = 0; for (i = 0; i < n; i++) - sum += f(i); + sum += f(i); /* call f() here */ return sum; } """) @@ -705,7 +710,28 @@ keep this object alive for as long as the callback may be invoked. The easiest way to do that is to always use ``@ffi.callback()`` at module-level only, and to pass "context" information around with -`ffi.new_handle()`_, if possible. +`ffi.new_handle()`_, if possible. Example: + +.. code-block:: python + + # a good way to use this decorator is once at global level + @ffi.callback("int(int, void *)") + def my_global_callback(x, handle): + return ffi.from_handle(handle).some_method(x) + + + class Foo(object): + + def __init__(self): + handle = ffi.new_handle(self) + self._handle = handle # must be kept alive + lib.register_stuff_with_callback_and_voidp_arg(my_global_callback, handle) + + def some_method(self, x): + ... + +(See also the section about `extern "Python"`_ above, where the same +general style is used.) Note that callbacks of a variadic function type are not supported. A workaround is to add custom C code. In the following example, a @@ -797,7 +823,7 @@ hurt) to say ``WINAPI`` or ``__stdcall`` when declaring a plain function in the ``cdef()``. (The difference can still be seen if you take explicitly a pointer to this function with ``ffi.addressof()``, -or if the function is ``CFFI_CALL_PYTHON``.) +or if the function is ``extern "Python"``.) These calling convention specifiers are accepted but ignored on any platform other than 32-bit Windows. @@ -1139,6 +1165,58 @@ returned by ``alloc()`` is assumed to be already cleared (or you are fine with garbage); otherwise CFFI will clear it. +.. _initonce: + +**ffi.init_once(function, tag)**: run ``function()`` once. The +``tag`` should be a primitive object, like a string, that identifies +the function: ``function()`` is only called the first time we see the +``tag``. The return value of ``function()`` is remembered and +returned by the current and all future ``init_once()`` with the same +tag. If ``init_once()`` is called from multiple threads in parallel, +all calls block until the execution of ``function()`` is done. If +``function()`` raises an exception, it is propagated and nothing is +cached (i.e. ``function()`` will be called again, in case we catch the +exception and try ``init_once()`` again). *New in version 1.4.* + +Example:: + + from _xyz_cffi import ffi, lib + + def initlib(): + lib.init_my_library() + + def make_new_foo(): + ffi.init_once(initlib, "init") + return lib.make_foo() + +``init_once()`` is optimized to run very quickly if ``function()`` has +already been called. (On PyPy, the cost is zero---the JIT usually +removes everything in the machine code it produces.) + +*Note:* one motivation__ for ``init_once()`` is the CPython notion of +"subinterpreters" in the embedded case. If you are using the +out-of-line API mode, ``function()`` is called only once even in the +presence of multiple subinterpreters, and its return value is shared +among all subinterpreters. The goal is to mimic the way traditional +CPython C extension modules have their init code executed only once in +total even if there are subinterpreters. In the example above, the C +function ``init_my_library()`` is called once in total, not once per +subinterpreter. For this reason, avoid Python-level side-effects in +``function()`` (as they will only be applied in the first +subinterpreter to run); instead, return a value, as in the following +example:: + + def init_get_max(): + return lib.initialize_once_and_get_some_maximum_number() + + def process(i): + if i > ffi.init_once(init_get_max, "max"): + raise IndexError("index too large!") + ... + +.. __: https://bitbucket.org/cffi/cffi/issues/233/ + + .. _`Preparing and Distributing modules`: cdef.html#loading-libraries @@ -1233,8 +1311,8 @@ function with a ``char *`` argument to which you pass a Python string will not actually modify the array of characters passed in, and so passes directly a pointer inside the Python string object. - (PyPy might in the future do the same, but it is harder because a - string object can move in memory when the GC runs.) + (PyPy might in the future do the same, but it is harder because + strings are not naturally zero-terminated in PyPy.) `(**)` C function calls are done with the GIL released. diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,18 +3,61 @@ ====================== +v1.4.2 +====== + +Nothing changed from v1.4.1. + + +v1.4.1 +====== + +* Fix the compilation failure of cffi on CPython 3.5.0. (3.5.1 works; + some detail changed that makes some underscore-starting macros + disappear from view of extension modules, and I worked around it, + thinking it changed in all 3.5 versions---but no: it was only in + 3.5.1.) + + v1.4.0 ====== +* A `better way to do callbacks`__ has been added (faster and more + portable, and usually cleaner). It is a mechanism for the + out-of-line API mode that replaces the dynamic creation of callback + objects (i.e. C functions that invoke Python) with the static + declaration in ``cdef()`` of which callbacks are needed. This is + more C-like, in that you have to structure your code around the idea + that you get a fixed number of function pointers, instead of + creating them on-the-fly. + +* ``ffi.compile()`` now takes an optional ``verbose`` argument. When + ``True``, distutils prints the calls to the compiler. + +* ``ffi.compile()`` used to fail if given ``sources`` with a path that + includes ``".."``. Fixed. + +* ``ffi.init_once()`` added. See docs__. + +* ``dir(lib)`` now works on libs returned by ``ffi.dlopen()`` too. + +* Cleaned up and modernized the content of the ``demo`` subdirectory + in the sources (thanks matti!). + * ``ffi.new_handle()`` is now guaranteed to return unique ``void *`` values, even if called twice on the same object. Previously, in - that case, CPython (but not PyPy) would return two ``cdata`` objects - with the same ``void *`` value. This change is useful to add and - remove handles from a global dict or set without worrying about - duplicates. + that case, CPython would return two ``cdata`` objects with the same + ``void *`` value. This change is useful to add and remove handles + from a global dict (or set) without worrying about duplicates. + It already used to work like that on PyPy. + *This change can break code that used to work on CPython by relying + on the object to be kept alive by other means than keeping the + result of ffi.new_handle() alive.* (The corresponding `warning in + the docs`__ of ``ffi.new_handle()`` has been here since v0.8!) -* ``ffi.init_once()`` XXX - https://bitbucket.org/cffi/cffi/issues/233/ +.. __: using.html#extern-python +.. __: using.html#initonce +.. __: using.html#ffi-new-handle v1.3.1 diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -144,7 +144,7 @@ `Mailing list `_ """, - version='1.3.1', + version='1.4.2', packages=['cffi'] if cpython else [], package_data={'cffi': ['_cffi_include.h', 'parse_c_type.h']} if cpython else {}, diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -1352,8 +1352,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("enum foo;") from cffi import __version_info__ - if __version_info__ < (1, 4): - py.test.skip("re-enable me in version 1.4") + if __version_info__ < (1, 5): + py.test.skip("re-enable me in version 1.5") e = py.test.raises(CDefError, ffi.cast, "enum foo", -1) assert str(e.value) == ( "'enum foo' has no values explicitly defined: refusing to guess " @@ -1826,7 +1826,12 @@ assert seen == [1, 1] def test_init_once_multithread(self): - import thread, time + import sys, time + if sys.version_info < (3,): + import thread + else: + import _thread as thread + # def do_init(): seen.append('init!') time.sleep(1) diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -464,10 +464,22 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - if (sys.platform == 'win32' and sys.maxsize < 2**32 and + if (sys.platform == 'win32' and sys.maxsize < 2**32 and self.Backend is not CTypesBackend): assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) else: assert "double(*)(double)" in str(ffi.typeof(m.sin)) x = m.sin(1.23) assert x == math.sin(1.23) + + def test_dir_on_dlopen_lib(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + typedef enum { MYE1, MYE2 } myenum_t; + double myfunc(double); + double myvar; + const double myconst; + #define MYFOO 42 + """) + m = ffi.dlopen(lib_m) + assert dir(m) == ['MYE1', 'MYE2', 'MYFOO', 'myconst', 'myfunc', 'myvar'] diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -7,6 +7,7 @@ p = ffi.new("int *") p[0] = -42 assert p[0] == -42 + assert type(ffi) is ffi.__class__ is _cffi1_backend.FFI def test_ffi_subclass(): class FOO(_cffi1_backend.FFI): @@ -16,6 +17,7 @@ assert foo.x == 42 p = foo.new("int *") assert p[0] == 0 + assert type(foo) is foo.__class__ is FOO def test_ffi_no_argument(): py.test.raises(TypeError, _cffi1_backend.FFI, 42) @@ -437,13 +439,18 @@ assert seen == [1, 1] def test_init_once_multithread(): - import thread, time + if sys.version_info < (3,): + import thread + else: + import _thread as thread + import time + # def do_init(): - print 'init!' + print('init!') seen.append('init!') time.sleep(1) seen.append('init done') - print 'init done' + print('init done') return 7 ffi = _cffi1_backend.FFI() seen = [] @@ -454,3 +461,37 @@ thread.start_new_thread(f, ()) time.sleep(1.5) assert seen == ['init!', 'init done'] + 6 * [7] + +def test_init_once_failure(): + def do_init(): + seen.append(1) + raise ValueError + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(5): + py.test.raises(ValueError, ffi.init_once, do_init, "tag") + assert seen == [1] * (i + 1) + +def test_init_once_multithread_failure(): + if sys.version_info < (3,): + import thread + else: + import _thread as thread + import time + def do_init(): + seen.append('init!') + time.sleep(1) + seen.append('oops') + raise ValueError + ffi = _cffi1_backend.FFI() + seen = [] + for i in range(3): + def f(): + py.test.raises(ValueError, ffi.init_once, do_init, "tag") + thread.start_new_thread(f, ()) + i = 0 + while len(seen) < 6: + i += 1 + assert i < 20 + time.sleep(0.51) + assert seen == ['init!', 'oops'] * 3 diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -3,7 +3,7 @@ from cffi import FFI, VerificationError, FFIError from cffi import recompiler from testing.udir import udir -from testing.support import u +from testing.support import u, long from testing.support import FdWriteCapture, StdErrCapture @@ -948,6 +948,19 @@ """, sources=[str(extra_c_source)]) assert lib.external_foo == 42 +def test_dotdot_in_source_file_names(): + extra_c_source = udir.join( + 'extra_test_dotdot_in_source_file_names.c') + extra_c_source.write('const int external_foo = 42;\n') + ffi = FFI() + ffi.cdef("const int external_foo;") + lib = verify(ffi, 'test_dotdot_in_source_file_names', """ + extern const int external_foo; + """, sources=[os.path.join(os.path.dirname(str(extra_c_source)), + 'foobar', '..', + os.path.basename(str(extra_c_source)))]) + assert lib.external_foo == 42 + def test_call_with_incomplete_structs(): ffi = FFI() ffi.cdef("typedef struct {...;} foo_t; " @@ -1143,6 +1156,7 @@ assert hasattr(lib, '__dict__') assert lib.__all__ == ['MYFOO', 'mybar'] # but not 'myvar' assert lib.__name__ == repr(lib) + assert lib.__class__ is type(lib) def test_macro_var_callback(): ffi = FFI() @@ -1502,8 +1516,8 @@ res = lib.bar(4, 5) assert res == 0 assert f.getvalue() == ( - "extern \"Python\": function bar() called, but no code was attached " - "to it yet with @ffi.def_extern(). Returning 0.\n") + b"extern \"Python\": function bar() called, but no code was attached " + b"to it yet with @ffi.def_extern(). Returning 0.\n") @ffi.def_extern("bar") def my_bar(x, y): @@ -1520,10 +1534,10 @@ baz1 = ffi.def_extern()(baz) assert baz1 is baz seen = [] - baz(40L, 4L) - res = lib.baz(50L, 8L) + baz(long(40), long(4)) + res = lib.baz(long(50), long(8)) assert res is None - assert seen == [("Baz", 40L, 4L), ("Baz", 50, 8)] + assert seen == [("Baz", 40, 4), ("Baz", 50, 8)] assert type(seen[0][1]) is type(seen[0][2]) is long assert type(seen[1][1]) is type(seen[1][2]) is int diff --git a/testing/support.py b/testing/support.py --- a/testing/support.py +++ b/testing/support.py @@ -8,6 +8,7 @@ return eval('u'+repr(other).replace(r'\\u', r'\u') .replace(r'\\U', r'\U')) u = U() + long = long # for further "from testing.support import long" assert u+'a\x00b' == eval(r"u'a\x00b'") assert u+'a\u1234b' == eval(r"u'a\u1234b'") assert u+'a\U00012345b' == eval(r"u'a\U00012345b'") @@ -22,9 +23,12 @@ class StdErrCapture(object): """Capture writes to sys.stderr (not to the underlying file descriptor).""" def __enter__(self): - import StringIO + try: + from StringIO import StringIO + except ImportError: + from io import StringIO self.old_stderr = sys.stderr - sys.stderr = f = StringIO.StringIO() + sys.stderr = f = StringIO() return f def __exit__(self, *args): sys.stderr = self.old_stderr @@ -35,6 +39,9 @@ to the Posix manual.""" def __init__(self, capture_fd=2): # stderr by default + if sys.platform == 'win32': + import py + py.test.skip("seems not to work, too bad") self.capture_fd = capture_fd def __enter__(self): From pypy.commits at gmail.com Thu Dec 31 05:43:43 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:43:43 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: tweaks Message-ID: <568506df.6351c20a.2321.fffff9f3@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2500:eb5b30231847 Date: 2015-12-30 15:46 +0100 http://bitbucket.org/cffi/cffi/changeset/eb5b30231847/ Log: tweaks diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -4,7 +4,7 @@ #if defined(_MSC_VER) # define CFFI_DLLEXPORT __declspec(dllexport) #elif defined(__GNUC__) -# define CFFI_DLLEXPORT __attribute__ ((visibility("default"))) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) #else # define CFFI_DLLEXPORT /* nothing */ #endif @@ -33,12 +33,19 @@ #ifndef _MSC_VER /* --- Assuming a GCC not infinitely old --- */ -# define compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) -# define write_barrier() __sync_synchronize() +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif #else /* --- Windows threads version --- */ -# define compare_and_swap(l,o,n) InterlockedCompareExchangePointer(l,n,o) -# define write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_compare_and_swap(l,o,n) InterlockedCompareExchangePointer(l,n,o) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 static volatile LONG _cffi_dummy; #endif @@ -56,7 +63,7 @@ { static volatile void *lock = NULL; - while (!compare_and_swap(&lock, NULL, (void *)1)) { + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { /* should ideally do a spin loop instruction here, but hard to do it portably and doesn't really matter I think: PyEval_InitThreads() should be very fast, and @@ -77,7 +84,7 @@ } #endif - while (!compare_and_swap(&lock, (void *)1, NULL)) + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) ; #ifndef _MSC_VER @@ -237,7 +244,7 @@ old_value = *lock; if (old_value[0] == 'E') { assert(old_value[1] == 'N'); - if (compare_and_swap(lock, old_value, old_value + 1)) + if (cffi_compare_and_swap(lock, old_value, old_value + 1)) break; } else { @@ -253,11 +260,11 @@ PyEval_InitThreads(); /* makes the GIL */ PyEval_ReleaseLock(); /* then release it */ } - /* else: we already have the GIL, but we still needed to do the + /* else: there is already a GIL, but we still needed to do the spinlock dance to make sure that we see it as fully ready */ /* release the lock */ - while (!compare_and_swap(lock, old_value + 1, old_value)) + while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) ; #endif } @@ -364,7 +371,7 @@ after that read barrier, we see everything done here before the write barrier. */ - write_barrier(); + cffi_write_barrier(); assert(_cffi_call_python_org != NULL); _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; @@ -406,9 +413,6 @@ fnptr(externpy, args); } -#undef compare_and_swap -#undef write_barrier - /* The cffi_start_python() function makes sure Python is initialized and our cffi module is set up. It can be called manually from the @@ -422,5 +426,10 @@ if (_cffi_start_python() == NULL) return -1; } + cffi_read_barrier(); return 0; } + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier From pypy.commits at gmail.com Thu Dec 31 05:44:04 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:44:04 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: continue the cffi-static-callback-embedding branch, merge from default Message-ID: <568506f4.443f1c0a.8f26.ffffc879@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81507:81e85ede3e5a Date: 2015-12-30 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/81e85ede3e5a/ Log: continue the cffi-static-callback-embedding branch, merge from default diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -10,8 +10,6 @@ from rpython.config.config import ConflictConfigError from pypy.tool.option import make_objspace from pypy.conftest import pypydir -from rpython.rlib import rthread -from pypy.module.thread import os_thread thisdir = py.path.local(__file__).dirpath() @@ -78,108 +76,8 @@ return 1 return exitcode - # register the minimal equivalent of running a small piece of code. This - # should be used as sparsely as possible, just to register callbacks - - from rpython.rlib.entrypoint import entrypoint_highlevel - from rpython.rtyper.lltypesystem import rffi, lltype - - w_pathsetter = space.appexec([], """(): - def f(path): - import sys - sys.path[:] = path - return f - """) - - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], - c_name='pypy_setup_home') - def pypy_setup_home(ll_home, verbose): - from pypy.module.sys.initpath import pypy_find_stdlib - verbose = rffi.cast(lltype.Signed, verbose) - if ll_home: - home1 = rffi.charp2str(ll_home) - home = os.path.join(home1, 'x') # <- so that 'll_home' can be - # directly the root directory - else: - home = home1 = pypydir - w_path = pypy_find_stdlib(space, home) - if space.is_none(w_path): - if verbose: - debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'" - " not found in '%s' or in any parent directory" % home1) - return rffi.cast(rffi.INT, 1) - space.startup() - space.call_function(w_pathsetter, w_path) - # import site - try: - space.setattr(space.getbuiltinmodule('sys'), - space.wrap('executable'), - space.wrap(home)) - import_ = space.getattr(space.getbuiltinmodule('__builtin__'), - space.wrap('__import__')) - space.call_function(import_, space.wrap('site')) - return rffi.cast(rffi.INT, 0) - except OperationError, e: - if verbose: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return rffi.cast(rffi.INT, -1) - - @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') - def pypy_execute_source(ll_source): - return pypy_execute_source_ptr(ll_source, 0) - - @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], - c_name='pypy_execute_source_ptr') - def pypy_execute_source_ptr(ll_source, ll_ptr): - source = rffi.charp2str(ll_source) - res = _pypy_execute_source(source, ll_ptr) - return rffi.cast(rffi.INT, res) - - @entrypoint_highlevel('main', [], c_name='pypy_init_threads') - def pypy_init_threads(): - if not space.config.objspace.usemodules.thread: - return - os_thread.setup_threads(space) - - @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') - def pypy_thread_attach(): - if not space.config.objspace.usemodules.thread: - return - os_thread.setup_threads(space) - os_thread.bootstrapper.acquire(space, None, None) - rthread.gc_thread_start() - os_thread.bootstrapper.nbthreads += 1 - os_thread.bootstrapper.release() - - def _pypy_execute_source(source, c_argument): - try: - w_globals = space.newdict(module=True) - space.setitem(w_globals, space.wrap('__builtins__'), - space.builtin_modules['__builtin__']) - space.setitem(w_globals, space.wrap('c_argument'), - space.wrap(c_argument)) - space.appexec([space.wrap(source), w_globals], """(src, glob): - import sys - stmt = compile(src, 'c callback', 'exec') - if not hasattr(sys, '_pypy_execute_source'): - sys._pypy_execute_source = [] - sys._pypy_execute_source.append(glob) - exec stmt in glob - """) - except OperationError, e: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 - return 0 - - return entry_point, {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} + from pypy.interpreter import embedding + return entry_point, embedding.capture(space, debug) # _____ Define and setup target ___ diff --git a/pypy/interpreter/embedding.py b/pypy/interpreter/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/embedding.py @@ -0,0 +1,173 @@ +from rpython.rtyper.lltypesystem import rffi, lltype + + +def capture(space, debug): + from rpython.rlib.entrypoint import entrypoint_highlevel + from rpython.rlib import rthread + from pypy.module.thread import os_thread + from pypy.conftest import pypydir + from pypy.module.sys.initpath import pypy_find_stdlib + + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], + c_name='pypy_setup_home') + def pypy_setup_home(ll_home, verbose): + _declare_c_function() + verbose = rffi.cast(lltype.Signed, verbose) + if ll_home: + home1 = rffi.charp2str(ll_home) + home = os.path.join(home1, 'x') # <- so that 'll_home' can be + # directly the root directory + else: + home = home1 = pypydir + w_path = pypy_find_stdlib(space, home) + if space.is_none(w_path): + if verbose: + debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'" + " not found in '%s' or in any parent directory" % home1) + return rffi.cast(rffi.INT, 1) + space.startup() + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) + # import site + try: + space.setattr(space.getbuiltinmodule('sys'), + space.wrap('executable'), + space.wrap(home)) + import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + space.wrap('__import__')) + space.call_function(import_, space.wrap('site')) + return rffi.cast(rffi.INT, 0) + except OperationError, e: + if verbose: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return rffi.cast(rffi.INT, -1) + + @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') + def pypy_execute_source(ll_source): + return pypy_execute_source_ptr(ll_source, 0) + + @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') + def pypy_execute_source_ptr(ll_source, ll_ptr): + source = rffi.charp2str(ll_source) + res = _pypy_execute_source(source, ll_ptr) + return rffi.cast(rffi.INT, res) + + @entrypoint_highlevel('main', [], c_name='pypy_init_threads') + def pypy_init_threads(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + + @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') + def pypy_thread_attach(): + if not space.config.objspace.usemodules.thread: + return + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork + rthread.gc_thread_start() + os_thread.bootstrapper.nbthreads += 1 + + def _pypy_execute_source(source, c_argument): + try: + w_globals = space.newdict(module=True) + space.setitem(w_globals, space.wrap('__builtins__'), + space.builtin_modules['__builtin__']) + space.setitem(w_globals, space.wrap('c_argument'), + space.wrap(c_argument)) + space.appexec([space.wrap(source), w_globals], """(src, glob): + import sys + stmt = compile(src, 'c callback', 'exec') + if not hasattr(sys, '_pypy_execute_source'): + sys._pypy_execute_source = [] + sys._pypy_execute_source.append(glob) + exec stmt in glob + """) + except OperationError, e: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return -1 + return 0 + + entrypoints_dict = {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} + + return entrypoints_dict + + +_declare_c_function = rffi.llexternal_use_eci(separate_module_sources=[ +""" +#define PYPY_INIT_NO_THREADS 0x01 +#define PYPY_INIT_QUIET 0x02 + +static char _pypy_init_ok = 0; +static void _pypy_init_once_quiet(void); +static void _pypy_init_once_verbose(void); + + +#ifndef _MSC_VER /* --- Posix version --- */ + +static char *guess_home(void) +{ + Dl_info info; + if (dladdr(&guess_home, &info) == 0) + return NULL; + return realpath(info.dli_fname, NULL); +} + +RPY_EXPORTED +int pypy_initialize(int flags) +{ + static pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; + static pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; + + pthread_once(&once_control_1, + (flags & PYPY_INIT_QUIET) ? _pypy_init_once_quiet + : _pypy_init_once_verbose); + + if (_pypy_init_ok && (flags & PYPY_INIT_NO_THREADS) == 0) + pthread_once(&once_control_2, pypy_init_threads); + + return _pypy_init_ok ? 0 : -1; +} + +#else /* --- Windows version --- */ + + XXX + +#endif + + +static void _pypy_init_once(int verbose) +{ + char *home; + int verbose; + rpython_startup_code(); + + home = guess_home(); + if (home == NULL) + return; + _pypy_init_ok = !pypy_setup_home(home, verbose); + free(home); +} + +static void _pypy_init_once_quiet(void) +{ + _pypy_init_once(0); +} + +static void _pypy_init_once_verbose(void) +{ + _pypy_init_once(1); +} +""" +]) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -65,6 +65,11 @@ if has_stdcall: interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def startup(self, space): + from pypy.module._cffi_backend import cffi1_module + cffi1_module.glob.space = space + cffi1_module.declare_c_function() + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -1,6 +1,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.entrypoint import entrypoint -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.module import Module from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject @@ -12,14 +13,14 @@ VERSION_EXPORT = 0x0A03 -initfunctype = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) +INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void)) def load_cffi1_module(space, name, path, initptr): # This is called from pypy.module.cpyext.api.load_extension_module() from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python - initfunc = rffi.cast(initfunctype, initptr) + initfunc = rffi.cast(INITFUNCPTR, initptr) with lltype.scoped_alloc(rffi.VOIDPP.TO, 16, zero=True) as p: p[0] = rffi.cast(rffi.VOIDP, VERSION_EXPORT) p[1] = rffi.cast(rffi.VOIDP, get_ll_cffi_call_python()) @@ -38,9 +39,74 @@ w_name = space.wrap(name) module = Module(space, w_name) - module.setdictvalue(space, '__file__', space.wrap(path)) + if path is not None: + module.setdictvalue(space, '__file__', space.wrap(path)) module.setdictvalue(space, 'ffi', space.wrap(ffi)) module.setdictvalue(space, 'lib', space.wrap(lib)) w_modules_dict = space.sys.get('modules') space.setitem(w_modules_dict, w_name, space.wrap(module)) space.setitem(w_modules_dict, space.wrap(name + '.lib'), space.wrap(lib)) + return module + + +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + name = rffi.charp2str(init_struct.name) + if not (VERSION_MIN <= version <= VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi *embedded* module '%s' has unknown version %s", + name, hex(version)) + module = load_cffi1_module(space, name, None, init_struct.func) + # + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, + 'exec', 0) + w_globals = module.getdict(space) + space.call_method(w_globals, "setdefault", "__builtins__", + self.get_builtin()) + pycode.exec_code(self, w_globals, w_globals) + + +class Global: + pass +glob = Global() + + at entrypoint('main', [rffi.INT, rffi.VOIDP], + c_name='_pypy_init_embedded_cffi_module') +def _pypy_init_embedded_cffi_module(version, init_struct): + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + version = rffi.cast(lltype.Signed, version) + try: + load_embedded_cffi_module(glob.space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(glob.space, "initialization of '%s'" % name, + with_traceback=True) + res = -1 + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) From pypy.commits at gmail.com Thu Dec 31 05:44:06 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:44:06 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: revert some changes, and roughly finish the logic. Need to think about how to test that Message-ID: <568506f6.034cc20a.16a73.fffff771@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81508:381dbfd502eb Date: 2015-12-30 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/381dbfd502eb/ Log: revert some changes, and roughly finish the logic. Need to think about how to test that diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -10,6 +10,8 @@ from rpython.config.config import ConflictConfigError from pypy.tool.option import make_objspace from pypy.conftest import pypydir +from rpython.rlib import rthread +from pypy.module.thread import os_thread thisdir = py.path.local(__file__).dirpath() @@ -76,8 +78,107 @@ return 1 return exitcode - from pypy.interpreter import embedding - return entry_point, embedding.capture(space, debug) + # register the minimal equivalent of running a small piece of code. This + # should be used as sparsely as possible, just to register callbacks + + from rpython.rlib.entrypoint import entrypoint_highlevel + from rpython.rtyper.lltypesystem import rffi, lltype + + @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], + c_name='pypy_setup_home') + def pypy_setup_home(ll_home, verbose): + from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) + if ll_home: + home1 = rffi.charp2str(ll_home) + home = os.path.join(home1, 'x') # <- so that 'll_home' can be + # directly the root directory + else: + home = home1 = pypydir + w_path = pypy_find_stdlib(space, home) + if space.is_none(w_path): + if verbose: + debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'" + " not found in '%s' or in any parent directory" % home1) + return rffi.cast(rffi.INT, 1) + space.startup() + space.appexec([w_path], """(path): + import sys + sys.path[:] = path + """) + # import site + try: + space.setattr(space.getbuiltinmodule('sys'), + space.wrap('executable'), + space.wrap(home)) + import_ = space.getattr(space.getbuiltinmodule('__builtin__'), + space.wrap('__import__')) + space.call_function(import_, space.wrap('site')) + return rffi.cast(rffi.INT, 0) + except OperationError, e: + if verbose: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return rffi.cast(rffi.INT, -1) + + @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') + def pypy_execute_source(ll_source): + return pypy_execute_source_ptr(ll_source, 0) + + @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') + def pypy_execute_source_ptr(ll_source, ll_ptr): + source = rffi.charp2str(ll_source) + res = _pypy_execute_source(source, ll_ptr) + return rffi.cast(rffi.INT, res) + + @entrypoint_highlevel('main', [], c_name='pypy_init_threads') + def pypy_init_threads(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + + @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') + def pypy_thread_attach(): + if not space.config.objspace.usemodules.thread: + return + os_thread.setup_threads(space) + os_thread.bootstrapper.acquire(space, None, None) + # XXX this doesn't really work. Don't use os.fork(), and + # if your embedder program uses fork(), don't use any PyPy + # code in the fork + rthread.gc_thread_start() + os_thread.bootstrapper.nbthreads += 1 + os_thread.bootstrapper.release() + + def _pypy_execute_source(source, c_argument): + try: + w_globals = space.newdict(module=True) + space.setitem(w_globals, space.wrap('__builtins__'), + space.builtin_modules['__builtin__']) + space.setitem(w_globals, space.wrap('c_argument'), + space.wrap(c_argument)) + space.appexec([space.wrap(source), w_globals], """(src, glob): + import sys + stmt = compile(src, 'c callback', 'exec') + if not hasattr(sys, '_pypy_execute_source'): + sys._pypy_execute_source = [] + sys._pypy_execute_source.append(glob) + exec stmt in glob + """) + except OperationError, e: + debug("OperationError:") + debug(" operror-type: " + e.w_type.getname(space)) + debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) + return -1 + return 0 + + return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, + 'pypy_init_threads': pypy_init_threads, + 'pypy_thread_attach': pypy_thread_attach, + 'pypy_setup_home': pypy_setup_home} # _____ Define and setup target ___ diff --git a/pypy/interpreter/embedding.py b/pypy/interpreter/embedding.py deleted file mode 100644 --- a/pypy/interpreter/embedding.py +++ /dev/null @@ -1,173 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi, lltype - - -def capture(space, debug): - from rpython.rlib.entrypoint import entrypoint_highlevel - from rpython.rlib import rthread - from pypy.module.thread import os_thread - from pypy.conftest import pypydir - from pypy.module.sys.initpath import pypy_find_stdlib - - @entrypoint_highlevel('main', [rffi.CCHARP, rffi.INT], - c_name='pypy_setup_home') - def pypy_setup_home(ll_home, verbose): - _declare_c_function() - verbose = rffi.cast(lltype.Signed, verbose) - if ll_home: - home1 = rffi.charp2str(ll_home) - home = os.path.join(home1, 'x') # <- so that 'll_home' can be - # directly the root directory - else: - home = home1 = pypydir - w_path = pypy_find_stdlib(space, home) - if space.is_none(w_path): - if verbose: - debug("pypy_setup_home: directories 'lib-python' and 'lib_pypy'" - " not found in '%s' or in any parent directory" % home1) - return rffi.cast(rffi.INT, 1) - space.startup() - space.appexec([w_path], """(path): - import sys - sys.path[:] = path - """) - # import site - try: - space.setattr(space.getbuiltinmodule('sys'), - space.wrap('executable'), - space.wrap(home)) - import_ = space.getattr(space.getbuiltinmodule('__builtin__'), - space.wrap('__import__')) - space.call_function(import_, space.wrap('site')) - return rffi.cast(rffi.INT, 0) - except OperationError, e: - if verbose: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return rffi.cast(rffi.INT, -1) - - @entrypoint_highlevel('main', [rffi.CCHARP], c_name='pypy_execute_source') - def pypy_execute_source(ll_source): - return pypy_execute_source_ptr(ll_source, 0) - - @entrypoint_highlevel('main', [rffi.CCHARP, lltype.Signed], - c_name='pypy_execute_source_ptr') - def pypy_execute_source_ptr(ll_source, ll_ptr): - source = rffi.charp2str(ll_source) - res = _pypy_execute_source(source, ll_ptr) - return rffi.cast(rffi.INT, res) - - @entrypoint_highlevel('main', [], c_name='pypy_init_threads') - def pypy_init_threads(): - if not space.config.objspace.usemodules.thread: - return - os_thread.setup_threads(space) - - @entrypoint_highlevel('main', [], c_name='pypy_thread_attach') - def pypy_thread_attach(): - if not space.config.objspace.usemodules.thread: - return - # XXX this doesn't really work. Don't use os.fork(), and - # if your embedder program uses fork(), don't use any PyPy - # code in the fork - rthread.gc_thread_start() - os_thread.bootstrapper.nbthreads += 1 - - def _pypy_execute_source(source, c_argument): - try: - w_globals = space.newdict(module=True) - space.setitem(w_globals, space.wrap('__builtins__'), - space.builtin_modules['__builtin__']) - space.setitem(w_globals, space.wrap('c_argument'), - space.wrap(c_argument)) - space.appexec([space.wrap(source), w_globals], """(src, glob): - import sys - stmt = compile(src, 'c callback', 'exec') - if not hasattr(sys, '_pypy_execute_source'): - sys._pypy_execute_source = [] - sys._pypy_execute_source.append(glob) - exec stmt in glob - """) - except OperationError, e: - debug("OperationError:") - debug(" operror-type: " + e.w_type.getname(space)) - debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return -1 - return 0 - - entrypoints_dict = {'pypy_execute_source': pypy_execute_source, - 'pypy_execute_source_ptr': pypy_execute_source_ptr, - 'pypy_init_threads': pypy_init_threads, - 'pypy_thread_attach': pypy_thread_attach, - 'pypy_setup_home': pypy_setup_home} - - return entrypoints_dict - - -_declare_c_function = rffi.llexternal_use_eci(separate_module_sources=[ -""" -#define PYPY_INIT_NO_THREADS 0x01 -#define PYPY_INIT_QUIET 0x02 - -static char _pypy_init_ok = 0; -static void _pypy_init_once_quiet(void); -static void _pypy_init_once_verbose(void); - - -#ifndef _MSC_VER /* --- Posix version --- */ - -static char *guess_home(void) -{ - Dl_info info; - if (dladdr(&guess_home, &info) == 0) - return NULL; - return realpath(info.dli_fname, NULL); -} - -RPY_EXPORTED -int pypy_initialize(int flags) -{ - static pthread_once_t once_control_1 = PTHREAD_ONCE_INIT; - static pthread_once_t once_control_2 = PTHREAD_ONCE_INIT; - - pthread_once(&once_control_1, - (flags & PYPY_INIT_QUIET) ? _pypy_init_once_quiet - : _pypy_init_once_verbose); - - if (_pypy_init_ok && (flags & PYPY_INIT_NO_THREADS) == 0) - pthread_once(&once_control_2, pypy_init_threads); - - return _pypy_init_ok ? 0 : -1; -} - -#else /* --- Windows version --- */ - - XXX - -#endif - - -static void _pypy_init_once(int verbose) -{ - char *home; - int verbose; - rpython_startup_code(); - - home = guess_home(); - if (home == NULL) - return; - _pypy_init_ok = !pypy_setup_home(home, verbose); - free(home); -} - -static void _pypy_init_once_quiet(void) -{ - _pypy_init_once(0); -} - -static void _pypy_init_once_verbose(void) -{ - _pypy_init_once(1); -} -""" -]) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -68,7 +68,6 @@ def startup(self, space): from pypy.module._cffi_backend import cffi1_module cffi1_module.glob.space = space - cffi1_module.declare_c_function() def get_dict_rtld_constants(): diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -62,21 +62,29 @@ ('code', rffi.CCHARP))) def load_embedded_cffi_module(space, version, init_struct): - name = rffi.charp2str(init_struct.name) + from pypy.module._cffi_backend.embedding import declare_c_function + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) if not (VERSION_MIN <= version <= VERSION_MAX): raise oefmt(space.w_ImportError, - "cffi *embedded* module '%s' has unknown version %s", - name, hex(version)) + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) module = load_cffi1_module(space, name, None, init_struct.func) - # code = rffi.charp2str(init_struct.code) compiler = space.createcompiler() - pycode = compiler.compile(code, "" % name, - 'exec', 0) + pycode = compiler.compile(code, "" % name, 'exec', 0) w_globals = module.getdict(space) - space.call_method(w_globals, "setdefault", "__builtins__", - self.get_builtin()) - pycode.exec_code(self, w_globals, w_globals) + space.call_method(w_globals, "setdefault", space.wrap("__builtins__"), + space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) class Global: @@ -90,13 +98,20 @@ try: init_struct = rffi.cast(INITSTRUCTPTR, init_struct) name = rffi.charp2str(init_struct.name) - version = rffi.cast(lltype.Signed, version) + # + space = glob.space try: - load_embedded_cffi_module(glob.space, version, init_struct) + load_embedded_cffi_module(space, version, init_struct) res = 0 except OperationError, operr: - operr.write_unraisable(glob.space, "initialization of '%s'" % name, + operr.write_unraisable(space, "initialization of '%s'" % name, with_traceback=True) + space.appexec([], """(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) res = -1 except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py new file mode 100644 --- /dev/null +++ b/pypy/module/_cffi_backend/embedding.py @@ -0,0 +1,58 @@ +from rpython.rtyper.lltypesystem import rffi + + +declare_c_function = rffi.llexternal_use_eci(separate_module_sources=[ +""" +/* XXX Windows missing */ +#include +#include +#include + +static unsigned char _cffi_ready = 0; +static const char *volatile _cffi_module_name; + +static void _cffi_init_error(const char *msg, const char *extra) +{ + fprintf(stderr, + "\nPyPy initialization failure when loading module '%s':\n%s%s\n", + _cffi_module_name, msg, extra); +} + +static void _cffi_init(void) +{ + Dl_info info; + char *home; + + rpython_startup_code(); + + if (dladdr(&_cffi_init, &info) == 0) { + _cffi_init_error("dladdr() failed: ", dlerror()); + return; + } + home = realpath(info.dli_fname, NULL); + if (pypy_setup_home(home, 1) != 0) { + _cffi_init_error("pypy_setup_home() failed", ""); + return; + } + + RPyGilAllocate(); + RPyGilRelease(); + _cffi_ready = 1; +} + +RPY_EXPORTED +int _cffi_carefully_make_gil(const char *name) +{ + /* For CFFI: this initializes the GIL and loads the home path. + It can be called completely concurrently from unrelated threads. + It assumes that we don't hold the GIL before (if it exists), and we + don't hold it afterwards. + */ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + _cffi_module_name = name; /* not really thread-safe, but better than + nothing */ + pthread_once(&once_control, _cffi_init); + return (int)_cffi_ready - 1; +} +"""]) From pypy.commits at gmail.com Thu Dec 31 05:44:08 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:44:08 -0800 (PST) Subject: [pypy-commit] pypy default: Remove the hacks in optimizeopt introduced by 'faster-rstruct' and Message-ID: <568506f8.913bc20a.d29ab.091f@mx.google.com> Author: Armin Rigo Branch: Changeset: r81509:c46508226ad9 Date: 2015-12-31 10:46 +0000 http://bitbucket.org/pypy/pypy/changeset/c46508226ad9/ Log: Remove the hacks in optimizeopt introduced by 'faster-rstruct' and instead add a 'llop.gc_load_indexed' operation that turns into a regular GC_LOAD_INDEXED resop. diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -638,18 +639,9 @@ return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): + a = support.cast_arg(lltype.Ptr(descr.A), a) + array = a._obj assert index >= 0 - if descr.A is descr.OUTERA: - a = support.cast_arg(lltype.Ptr(descr.A), a) - else: - # we use rffi.cast instead of support.cast_arg because the types - # might not be "compatible" enough from the lltype point of - # view. In particular, this happens when we use - # str_storage_getitem, in which an rpy_string is casted to - # rpy_string_as_Signed (or similar) - a = rffi.cast(lltype.Ptr(descr.OUTERA), a) - a = getattr(a, descr.OUTERA._arrayfld) - array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc @@ -714,6 +706,24 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -725,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1021,18 +1021,20 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) - elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): - # special-case 2: GcStruct with Array field - v_inst, c_field, v_index = op.args - STRUCT = v_inst.concretetype.TO - ARRAY = getattr(STRUCT, c_field.value) - assert isinstance(ARRAY, lltype.Array) - arraydescr = self.cpu.arraydescrof(STRUCT) - kind = getkind(op.result.concretetype)[0] - assert kind in ('i', 'f') - return SpaceOperation('getarrayitem_gc_%s' % kind, - [op.args[0], v_index, arraydescr], - op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) else: assert False, 'not supported' @@ -1084,6 +1086,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1434,6 +1434,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -535,16 +535,10 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem it might happen that op.getarg(0) is - # a virtual string, NOT an array. In that case, we cannot cache the - # getarrayitem as if it were an array, obviously. In theory we could - # improve by writing special code to interpter the buffer of the - # virtual string as if it were an array, but it looks complicate, - # fragile and not worth it. arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) cf = None - if indexb.is_constant() and not arrayinfo.is_vstring(): + if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant @@ -561,7 +555,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # the remember the result of reading the array item - if cf is not None and not arrayinfo.is_vstring(): + if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -24,9 +24,6 @@ def is_virtual(self): return False - def is_vstring(self): - return False - def is_precise(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -277,10 +277,8 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem we op.getarg(0) is a string, NOT an - # array, hence the check. In that case, it will be forced opinfo = self.getptrinfo(op.getarg(0)) - if opinfo and opinfo.is_virtual() and not opinfo.is_vstring(): + if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: item = opinfo.getitem(op.getdescr(), indexbox.getint()) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -62,9 +62,6 @@ self.mode = mode self.length = length - def is_vstring(self): - return True - def getlenbound(self, mode): from rpython.jit.metainterp.optimizeopt import intutils diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -810,6 +810,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) diff --git a/rpython/jit/metainterp/test/test_strstorage.py b/rpython/jit/metainterp/test/test_strstorage.py --- a/rpython/jit/metainterp/test/test_strstorage.py +++ b/rpython/jit/metainterp/test/test_strstorage.py @@ -19,7 +19,7 @@ res = self.interp_operations(f, [], supports_singlefloats=True) # kind = getkind(TYPE)[0] # 'i' or 'f' - self.check_operations_history({'getarrayitem_gc_%s' % kind: 1, + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, 'finish': 1}) # if TYPE == lltype.SingleFloat: @@ -29,8 +29,8 @@ return longlong.int2singlefloat(res) return res - def str_storage_supported(self, TYPE): - py.test.skip('this is not a JIT test') + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') def test_force_virtual_str_storage(self): byteorder = sys.byteorder @@ -48,6 +48,6 @@ 'strsetitem': 1, # str forcing 'call_pure_r': 1, # str forcing (copystrcontent) 'guard_no_exception': 1, # str forcing - 'getarrayitem_gc_i': 1, # str_storage_getitem + 'gc_load_indexed_i': 1, # str_storage_getitem 'finish': 1 }) diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -11,7 +11,6 @@ from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -12,7 +12,7 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib import rarithmetic from rpython.rtyper.lltypesystem import rffi @@ -185,13 +185,14 @@ data = fmtiter.read(size) fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) return - if not str_storage_supported(TYPE): - # this happens e.g. on win32 and ARM32: we cannot read the string - # content as an array of doubles because it's not properly - # aligned. But we can read a longlong and convert to float - assert TYPE == rffi.DOUBLE - assert rffi.sizeof(TYPE) == 8 - return unpack_longlong2float(fmtiter) + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) try: # fast path val = unpack_fastpath(TYPE)(fmtiter) @@ -246,7 +247,7 @@ @specialize.argtype(0) def unpack_int_fastpath_maybe(fmtiter): - if fmtiter.bigendian != native_is_bigendian or not str_storage_supported(TYPE): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): return False try: intvalue = unpack_fastpath(TYPE)(fmtiter) diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py --- a/rpython/rlib/strstorage.py +++ b/rpython/rlib/strstorage.py @@ -9,54 +9,31 @@ # rstr.py:copy_string_contents), which has no chance to work during # tracing # -# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading -# from GC pointers. However: -# -# a. we would like to use a CompositeOffset as the offset (using the -# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet) -# supported before translation: it works only if you pass an actual -# integer -# -# b. raw_load from a GC pointer is not (yet) supported by the -# JIT. There are plans to introduce a gc_load operation: when it -# will be there, we could fix the issue above and actually use it to -# implement str_storage_getitem -# -# 3. the actual solution: cast rpy_string to a GcStruct which has the very +# 2. cast rpy_string to a GcStruct which has the very # same layout, with the only difference that its 'chars' field is no # longer an Array(Char) but e.e. an Array(Signed). Then, we just need to -# read the appropriate index into the array +# read the appropriate index into the array. To support this solution, +# the JIT's optimizer needed a few workarounds. This was removed. +# +# 3. use the newly introduced 'llop.gc_load_indexed'. +# -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -from rpython.rtyper.lltypesystem.rstr import STR, _get_raw_str_buf + +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.annlowlevel import llstr -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize - at specialize.memo() -def _rpy_string_as_type(TP): - # sanity check that STR is actually what we think it is - assert STR._flds == { - 'hash': lltype.Signed, - 'chars': lltype.Array(lltype.Char, hints={'immutable': True}) - } - STR_AS_TP = lltype.GcStruct('rpy_string_as_%s' % TP, - ('hash', lltype.Signed), - ('chars', lltype.Array(TP, hints={'immutable': True}))) - return STR_AS_TP - - at specialize.arg(0) -def str_storage_supported(TP): - # on some architectures (e.g. win32 and arm32) an array of longlongs needs - # to be aligned at 8 bytes boundaries, so we cannot safely cast from STR - # to STR_AS_TP. In that case, we str_storage_getitem is simply not - # supported - return rffi.sizeof(TP) <= rffi.sizeof(lltype.Signed) @specialize.ll() -def str_storage_getitem(TP, s, index): - assert str_storage_supported(TP) # sanity check - STR_AS_TP = _rpy_string_as_type(TP) +def str_storage_getitem(TP, s, byte_offset): + # WARNING: the 'byte_offset' is, as its name says, measured in bytes; + # however, it should be aligned for TP, otherwise on some platforms this + # code will crash! lls = llstr(s) - str_as_tp = rffi.cast(lltype.Ptr(STR_AS_TP), lls) - index = index / rffi.sizeof(TP) - return str_as_tp.chars[index] + base_ofs = (llmemory.offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + scale_factor = llmemory.sizeof(lltype.Char) + return llop.gc_load_indexed(TP, lls, byte_offset, + scale_factor, base_ofs) diff --git a/rpython/rlib/test/test_strstorage.py b/rpython/rlib/test/test_strstorage.py --- a/rpython/rlib/test/test_strstorage.py +++ b/rpython/rlib/test/test_strstorage.py @@ -2,7 +2,7 @@ import sys import struct from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib.rarithmetic import r_singlefloat from rpython.rtyper.test.tool import BaseRtypingTest @@ -10,14 +10,14 @@ class BaseStrStorageTest: - def test_str_getitem_supported(self): - if IS_32BIT: - expected = False - else: - expected = True - # - assert self.str_storage_supported(rffi.LONGLONG) == expected - assert self.str_storage_supported(rffi.DOUBLE) == expected + ## def test_str_getitem_supported(self): + ## if IS_32BIT: + ## expected = False + ## else: + ## expected = True + ## # + ## assert self.str_storage_supported(rffi.LONGLONG) == expected + ## assert self.str_storage_supported(rffi.DOUBLE) == expected def test_signed(self): buf = struct.pack('@ll', 42, 43) @@ -34,8 +34,8 @@ assert int(x) == 43 def test_float(self): - if not str_storage_supported(lltype.Float): - py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') + ## if not str_storage_supported(lltype.Float): + ## py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') buf = struct.pack('@dd', 12.3, 45.6) size = struct.calcsize('@d') assert self.str_storage_getitem(lltype.Float, buf, 0) == 12.3 @@ -52,20 +52,45 @@ class TestDirect(BaseStrStorageTest): - def str_storage_supported(self, TYPE): - return str_storage_supported(TYPE) + ## def str_storage_supported(self, TYPE): + ## return str_storage_supported(TYPE) def str_storage_getitem(self, TYPE, buf, offset): return str_storage_getitem(TYPE, buf, offset) class TestRTyping(BaseStrStorageTest, BaseRtypingTest): - def str_storage_supported(self, TYPE): - def fn(): - return str_storage_supported(TYPE) - return self.interpret(fn, []) + ## def str_storage_supported(self, TYPE): + ## def fn(): + ## return str_storage_supported(TYPE) + ## return self.interpret(fn, []) def str_storage_getitem(self, TYPE, buf, offset): def fn(offset): return str_storage_getitem(TYPE, buf, offset) return self.interpret(fn, [offset]) + + +class TestCompiled(BaseStrStorageTest): + cache = {} + + def str_storage_getitem(self, TYPE, buf, offset): + if TYPE not in self.cache: + from rpython.translator.c.test.test_genc import compile + + assert isinstance(TYPE, lltype.Primitive) + if TYPE in (lltype.Float, lltype.SingleFloat): + TARGET_TYPE = lltype.Float + else: + TARGET_TYPE = lltype.Signed + + def llf(buf, offset): + x = str_storage_getitem(TYPE, buf, offset) + return lltype.cast_primitive(TARGET_TYPE, x) + + fn = compile(llf, [str, int]) + self.cache[TYPE] = fn + # + fn = self.cache[TYPE] + x = fn(buf, offset) + return lltype.cast_primitive(TYPE, x) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -902,6 +902,14 @@ llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat elif isinstance(llobj, ComputedIntSymbolic): llobj = llobj.compute_fn() + elif isinstance(llobj, llmemory.CompositeOffset): + llobj = sum([lltype2ctypes(c) for c in llobj.offsets]) + elif isinstance(llobj, llmemory.FieldOffset): + CSTRUCT = get_ctypes_type(llobj.TYPE) + llobj = getattr(CSTRUCT, llobj.fldname).offset + elif isinstance(llobj, llmemory.ArrayItemsOffset): + CARRAY = get_ctypes_type(llobj.TYPE) + llobj = CARRAY.items.offset else: raise NotImplementedError(llobj) # don't know about symbolic value diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -417,6 +417,7 @@ 'raw_load': LLOp(sideeffects=False, canrun=True), 'raw_store': LLOp(canrun=True), 'bare_raw_store': LLOp(), + 'gc_load_indexed': LLOp(sideeffects=False, canrun=True), 'stack_malloc': LLOp(), # mmh 'track_alloc_start': LLOp(), 'track_alloc_stop': LLOp(), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -702,6 +702,17 @@ return p[0] op_raw_load.need_result_type = True +def op_gc_load_indexed(TVAL, p, index, scale, base_ofs): + # 'base_ofs' should be a CompositeOffset(..., ArrayItemsOffset). + # 'scale' should be a llmemory.sizeof(). + from rpython.rtyper.lltypesystem import rffi + ofs = base_ofs + scale * index + if isinstance(ofs, int): + return op_raw_load(TVAL, p, ofs) + p = rffi.cast(rffi.CArrayPtr(TVAL), llmemory.cast_ptr_to_adr(p) + ofs) + return p[0] +op_gc_load_indexed.need_result_type = True + def op_likely(x): assert isinstance(x, bool) return x diff --git a/rpython/rtyper/lltypesystem/rbytearray.py b/rpython/rtyper/lltypesystem/rbytearray.py --- a/rpython/rtyper/lltypesystem/rbytearray.py +++ b/rpython/rtyper/lltypesystem/rbytearray.py @@ -8,10 +8,10 @@ def mallocbytearray(size): return lltype.malloc(BYTEARRAY, size) -_, _, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, +_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY, lltype.Char, 'bytearray') -_, _, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, +_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR, BYTEARRAY, lltype.Char, 'bytearray_from_str') diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -136,15 +136,13 @@ copy_raw_to_string = func_with_new_name(copy_raw_to_string, 'copy_raw_to_%s' % name) - return _get_raw_buf, copy_string_to_raw, copy_raw_to_string, copy_string_contents + return copy_string_to_raw, copy_raw_to_string, copy_string_contents -(_get_raw_str_buf, - copy_string_to_raw, +(copy_string_to_raw, copy_raw_to_string, copy_string_contents) = _new_copy_contents_fun(STR, STR, Char, 'string') -(_get_raw_unicode_buf, - copy_unicode_to_raw, +(copy_unicode_to_raw, copy_raw_to_unicode, copy_unicode_contents) = _new_copy_contents_fun(UNICODE, UNICODE, UniChar, 'unicode') diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -299,7 +299,7 @@ def gen_op(self, op): macro = 'OP_%s' % op.opname.upper() line = None - if op.opname.startswith('gc_'): + if op.opname.startswith('gc_') and op.opname != 'gc_load_indexed': meth = getattr(self.gcpolicy, macro, None) if meth: line = meth(self, op) @@ -709,6 +709,19 @@ "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" % locals()) + def OP_GC_LOAD_INDEXED(self, op): + addr = self.expr(op.args[0]) + index = self.expr(op.args[1]) + scale = self.expr(op.args[2]) + base_ofs = self.expr(op.args[3]) + result = self.expr(op.result) + TYPE = op.result.concretetype + typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') + return ( + "%(result)s = ((%(typename)s) (((char *)%(addr)s) + " + "%(base_ofs)s + %(scale)s * %(index)s))[0];" + % locals()) + def OP_CAST_PRIMITIVE(self, op): TYPE = self.lltypemap(op.result) val = self.expr(op.args[0]) From pypy.commits at gmail.com Thu Dec 31 05:43:44 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 02:43:44 -0800 (PST) Subject: [pypy-commit] cffi static-callback-embedding: more tweaks; tests needed... Message-ID: <568506e0.6a69c20a.d649d.ffffe676@mx.google.com> Author: Armin Rigo Branch: static-callback-embedding Changeset: r2501:c3084bd75943 Date: 2015-12-30 22:36 +0100 http://bitbucket.org/cffi/cffi/changeset/c3084bd75943/ Log: more tweaks; tests needed... diff --git a/cffi/_embedding.h b/cffi/_embedding.h --- a/cffi/_embedding.h +++ b/cffi/_embedding.h @@ -195,8 +195,9 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { - PyFile_WriteString("\ncffi version: 1.3.1", f); - PyFile_WriteString("\n_cffi_backend module: ", f); + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.4.2" + "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); if (mod == NULL) { @@ -218,7 +219,7 @@ PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ -static void _cffi_carefully_make_gil(void) +static int _cffi_carefully_make_gil(void) { /* This initializes the GIL. It can be called completely concurrently from unrelated threads. It assumes that we don't @@ -267,6 +268,7 @@ while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) ; #endif + return 0; } /********** end CPython-specific section **********/ @@ -289,10 +291,12 @@ _CFFI_PYTHON_STARTUP_CODE, }; +extern int pypy_carefully_make_gil(const char *); extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); -static void _cffi_carefully_make_gil(void) +static int _cffi_carefully_make_gil(void) { + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); } static int _cffi_initialize_python(void) @@ -345,14 +349,16 @@ */ static char called = 0; - _cffi_carefully_make_gil(); + if (_cffi_carefully_make_gil() != 0) + return NULL; + _cffi_acquire_reentrant_mutex(); /* Here the GIL exists, but we don't have it. We're only protected from concurrency by the reentrant mutex. */ - /* This file ignores subinterpreters and can only initialize the - embedded module once, in the main interpreter. */ + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ if (!called) { called = 1; /* invoke _cffi_initialize_python() only once, but don't set '_cffi_call_python' right now, @@ -365,13 +371,13 @@ '_cffi_call_python' without also seeing the rest of the data initialized. However, this is not possible. But the new value of '_cffi_call_python' is the function - 'cffi_call_python()' from _cffi_backend. We can put a - write barrier here, and a corresponding read barrier at - the start of cffi_call_python(). This ensures that - after that read barrier, we see everything done here - before the write barrier. + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. */ - cffi_write_barrier(); assert(_cffi_call_python_org != NULL); _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; diff --git a/testing/cffi0/test_version.py b/testing/cffi0/test_version.py --- a/testing/cffi0/test_version.py +++ b/testing/cffi0/test_version.py @@ -53,3 +53,10 @@ content = open(p).read() #v = BACKEND_VERSIONS.get(v, v) assert (('assert __version__ == "%s"' % v) in content) + +def test_embedding_h(): + parent = os.path.dirname(os.path.dirname(cffi.__file__)) + v = cffi.__version__ + p = os.path.join(parent, 'cffi', '_embedding.h') + content = open(p).read() + assert ('cffi version: %s"' % (v,)) in content From pypy.commits at gmail.com Thu Dec 31 07:42:02 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 04:42:02 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <5685229a.9a6f1c0a.5f3c6.1ffb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r685:0c65712acd39 Date: 2015-12-31 13:41 +0100 http://bitbucket.org/pypy/pypy.org/changeset/0c65712acd39/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $61634 of $105000 (58.7%) + $61639 of $105000 (58.7%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Thu Dec 31 10:55:53 2015 From: pypy.commits at gmail.com (mjacob) Date: Thu, 31 Dec 2015 07:55:53 -0800 (PST) Subject: [pypy-commit] pypy llvm-translation-backend: Implement inhibit_tail_call marker. Message-ID: <56855009.53ad1c0a.cd9fe.ffffe9e5@mx.google.com> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r81510:56cf0a68905a Date: 2015-12-31 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/56cf0a68905a/ Log: Implement inhibit_tail_call marker. This raises the required LLVM version to 3.8 (as of now unreleased). diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1216,10 +1216,17 @@ tmp.append('{arg.TV}'.format(arg=arg)) args = ', '.join(tmp) + tailmarker = '' + try: + if fn.value._obj.graph.inhibit_tail_call: + tailmarker = 'notail ' + except AttributeError: + pass + if result.type is LLVMVoid: - fmt = 'call void {fn.V}({args})' + fmt = '{tailmarker}call void {fn.V}({args})' else: - fmt = '{result.V} = call {result.T} {fn.V}({args})' + fmt = '{result.V} = {tailmarker}call {result.T} {fn.V}({args})' self.w(fmt.format(**locals())) op_indirect_call = op_direct_call diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -655,6 +655,16 @@ assert not fc(r_longfloat(0.0)) assert fc(r_longfloat(1.0)) + def test_recursive_notail(self): + def f(n): + if n <= 0: + return 42 + return f(n+1) + def entry_point(): + return f(1) + fc = self.getcompiled(entry_point, []) + fc(expected_exception_name='StackOverflow') + class TestLowLevelTypeLLVM(_LLVMMixin, test_lltyped.TestLowLevelType): def getcompiled(self, func, argtypes): From pypy.commits at gmail.com Thu Dec 31 10:59:48 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 07:59:48 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: Translation fixes, manual testing... Message-ID: <568550f4.cf821c0a.e2c36.ffffc4fe@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81511:d5c92fe3af5b Date: 2015-12-31 15:30 +0000 http://bitbucket.org/pypy/pypy/changeset/d5c92fe3af5b/ Log: Translation fixes, manual testing... diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,6 +1,7 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload, clibffi +from rpython.rlib import rdynload, clibffi, entrypoint +from rpython.rtyper.lltypesystem import rffi VERSION = "1.4.2" @@ -66,8 +67,8 @@ interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL def startup(self, space): - from pypy.module._cffi_backend import cffi1_module - cffi1_module.glob.space = space + from pypy.module._cffi_backend import embedding + embedding.glob.space = space def get_dict_rtld_constants(): @@ -82,3 +83,11 @@ for _name, _value in get_dict_rtld_constants().items(): Module.interpleveldefs[_name] = 'space.wrap(%d)' % _value + + +# write this entrypoint() here, to make sure it is registered early enough + at entrypoint.entrypoint_highlevel('main', [rffi.INT, rffi.VOIDP], + c_name='pypy_init_embedded_cffi_module') +def pypy_init_embedded_cffi_module(version, init_struct): + from pypy.module._cffi_backend import embedding + return embedding.pypy_init_embedded_cffi_module(version, init_struct) diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -1,7 +1,7 @@ +import os from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.entrypoint import entrypoint -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import oefmt from pypy.interpreter.module import Module from pypy.module._cffi_backend import parse_c_type from pypy.module._cffi_backend.ffi_obj import W_FFIObject @@ -47,81 +47,3 @@ space.setitem(w_modules_dict, w_name, space.wrap(module)) space.setitem(w_modules_dict, space.wrap(name + '.lib'), space.wrap(lib)) return module - - -# ____________________________________________________________ - - -EMBED_VERSION_MIN = 0xB011 -EMBED_VERSION_MAX = 0xB0FF - -STDERR = 2 -INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', - ('name', rffi.CCHARP), - ('func', rffi.VOIDP), - ('code', rffi.CCHARP))) - -def load_embedded_cffi_module(space, version, init_struct): - from pypy.module._cffi_backend.embedding import declare_c_function - declare_c_function() # translation-time hint only: - # declare _cffi_carefully_make_gil() - # - version = rffi.cast(lltype.Signed, version) - if not (VERSION_MIN <= version <= VERSION_MAX): - raise oefmt(space.w_ImportError, - "cffi embedded module has got unknown version tag %s", - hex(version)) - # - if space.config.objspace.usemodules.thread: - from pypy.module.thread import os_thread - os_thread.setup_threads(space) - # - name = rffi.charp2str(init_struct.name) - module = load_cffi1_module(space, name, None, init_struct.func) - code = rffi.charp2str(init_struct.code) - compiler = space.createcompiler() - pycode = compiler.compile(code, "" % name, 'exec', 0) - w_globals = module.getdict(space) - space.call_method(w_globals, "setdefault", space.wrap("__builtins__"), - space.wrap(space.builtin)) - pycode.exec_code(space, w_globals, w_globals) - - -class Global: - pass -glob = Global() - - at entrypoint('main', [rffi.INT, rffi.VOIDP], - c_name='_pypy_init_embedded_cffi_module') -def _pypy_init_embedded_cffi_module(version, init_struct): - name = "?" - try: - init_struct = rffi.cast(INITSTRUCTPTR, init_struct) - name = rffi.charp2str(init_struct.name) - # - space = glob.space - try: - load_embedded_cffi_module(space, version, init_struct) - res = 0 - except OperationError, operr: - operr.write_unraisable(space, "initialization of '%s'" % name, - with_traceback=True) - space.appexec([], """(): - import sys - sys.stderr.write('pypy version: %s.%s.%s\n' % - sys.pypy_version_info[:3]) - sys.stderr.write('sys.path: %r\n' % (sys.path,)) - """) - res = -1 - except Exception, e: - # oups! last-level attempt to recover. - try: - os.write(STDERR, "From initialization of '") - os.write(STDERR, name) - os.write(STDERR, "':\n") - os.write(STDERR, str(e)) - os.write(STDERR, "\n") - except: - pass - res = -1 - return rffi.cast(rffi.INT, res) diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,13 +1,97 @@ -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.interpreter.error import OperationError, oefmt -declare_c_function = rffi.llexternal_use_eci(separate_module_sources=[ -""" +# ____________________________________________________________ + + +EMBED_VERSION_MIN = 0xB011 +EMBED_VERSION_MAX = 0xB0FF + +STDERR = 2 +INITSTRUCTPTR = lltype.Ptr(lltype.Struct('CFFI_INIT', + ('name', rffi.CCHARP), + ('func', rffi.VOIDP), + ('code', rffi.CCHARP))) + +def load_embedded_cffi_module(space, version, init_struct): + from pypy.module._cffi_backend.cffi1_module import load_cffi1_module + declare_c_function() # translation-time hint only: + # declare _cffi_carefully_make_gil() + # + version = rffi.cast(lltype.Signed, version) + if not (EMBED_VERSION_MIN <= version <= EMBED_VERSION_MAX): + raise oefmt(space.w_ImportError, + "cffi embedded module has got unknown version tag %s", + hex(version)) + # + if space.config.objspace.usemodules.thread: + from pypy.module.thread import os_thread + os_thread.setup_threads(space) + # + name = rffi.charp2str(init_struct.name) + module = load_cffi1_module(space, name, None, init_struct.func) + code = rffi.charp2str(init_struct.code) + compiler = space.createcompiler() + pycode = compiler.compile(code, "" % name, 'exec', 0) + w_globals = module.getdict(space) + space.call_method(w_globals, "setdefault", space.wrap("__builtins__"), + space.wrap(space.builtin)) + pycode.exec_code(space, w_globals, w_globals) + + +class Global: + pass +glob = Global() + +def pypy_init_embedded_cffi_module(version, init_struct): + # called from __init__.py + name = "?" + try: + init_struct = rffi.cast(INITSTRUCTPTR, init_struct) + name = rffi.charp2str(init_struct.name) + # + space = glob.space + try: + load_embedded_cffi_module(space, version, init_struct) + res = 0 + except OperationError, operr: + operr.write_unraisable(space, "initialization of '%s'" % name, + with_traceback=True) + space.appexec([], r"""(): + import sys + sys.stderr.write('pypy version: %s.%s.%s\n' % + sys.pypy_version_info[:3]) + sys.stderr.write('sys.path: %r\n' % (sys.path,)) + """) + res = -1 + except Exception, e: + # oups! last-level attempt to recover. + try: + os.write(STDERR, "From initialization of '") + os.write(STDERR, name) + os.write(STDERR, "':\n") + os.write(STDERR, str(e)) + os.write(STDERR, "\n") + except: + pass + res = -1 + return rffi.cast(rffi.INT, res) + +# ____________________________________________________________ + + +eci = ExternalCompilationInfo(separate_module_sources=[ +r""" /* XXX Windows missing */ #include #include #include +RPY_EXPORTED void rpython_startup_code(void); +RPY_EXPORTED int pypy_setup_home(char *, int); + static unsigned char _cffi_ready = 0; static const char *volatile _cffi_module_name; @@ -24,6 +108,8 @@ char *home; rpython_startup_code(); + RPyGilAllocate(); + RPyGilRelease(); if (dladdr(&_cffi_init, &info) == 0) { _cffi_init_error("dladdr() failed: ", dlerror()); @@ -34,14 +120,11 @@ _cffi_init_error("pypy_setup_home() failed", ""); return; } - - RPyGilAllocate(); - RPyGilRelease(); _cffi_ready = 1; } RPY_EXPORTED -int _cffi_carefully_make_gil(const char *name) +int pypy_carefully_make_gil(const char *name) { /* For CFFI: this initializes the GIL and loads the home path. It can be called completely concurrently from unrelated threads. @@ -56,3 +139,5 @@ return (int)_cffi_ready - 1; } """]) + +declare_c_function = rffi.llexternal_use_eci(eci) From pypy.commits at gmail.com Thu Dec 31 10:59:49 2015 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Dec 2015 07:59:49 -0800 (PST) Subject: [pypy-commit] pypy cffi-static-callback-embedding: Translation fix Message-ID: <568550f5.6408c20a.7346.6f19@mx.google.com> Author: Armin Rigo Branch: cffi-static-callback-embedding Changeset: r81512:e4b349baee63 Date: 2015-12-31 16:02 +0000 http://bitbucket.org/pypy/pypy/changeset/e4b349baee63/ Log: Translation fix diff --git a/pypy/module/_cffi_backend/cffi1_module.py b/pypy/module/_cffi_backend/cffi1_module.py --- a/pypy/module/_cffi_backend/cffi1_module.py +++ b/pypy/module/_cffi_backend/cffi1_module.py @@ -1,4 +1,3 @@ -import os from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -1,3 +1,4 @@ +import os from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo From pypy.commits at gmail.com Thu Dec 31 11:50:18 2015 From: pypy.commits at gmail.com (sbauman) Date: Thu, 31 Dec 2015 08:50:18 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Sync with master Message-ID: <56855cca.05bd1c0a.a703c.4710@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81514:b131bc726e2b Date: 2015-12-31 11:49 -0500 http://bitbucket.org/pypy/pypy/changeset/b131bc726e2b/ Log: Sync with master diff too long, truncating to 2000 out of 2109 lines diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.1 +Version: 1.4.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.1" -__version_info__ = (1, 4, 1) +__version__ = "1.4.2" +__version_info__ = (1, 4, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/pypy/module/_cffi_backend/cglob.py b/pypy/module/_cffi_backend/cglob.py --- a/pypy/module/_cffi_backend/cglob.py +++ b/pypy/module/_cffi_backend/cglob.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend import newtype +from rpython.rlib import rgil from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -26,7 +27,9 @@ if not we_are_translated(): FNPTR = rffi.CCallback([], rffi.VOIDP) fetch_addr = rffi.cast(FNPTR, self.fetch_addr) + rgil.release() result = fetch_addr() + rgil.acquire() else: # careful in translated versions: we need to call fetch_addr, # but in a GIL-releasing way. The easiest is to invoke a diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -423,7 +423,9 @@ exchange_offset += rffi.getintfield(self.atypes[i], 'c_size') # store the exchange data size - cif_descr.exchange_size = exchange_offset + # we also align it to the next multiple of 8, in an attempt to + # work around bugs(?) of libffi (see cffi issue #241) + cif_descr.exchange_size = self.align_arg(exchange_offset) def fb_extra_fields(self, cif_descr): cif_descr.abi = self.fabi diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -386,7 +386,7 @@ return @cpython_api([PyObject, PyObject], PyObject, - error=lltype.nullptr(rffi.VOIDP.TO), external=True) + external=True) @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -414,15 +414,26 @@ return NULL; } PyObject *name = PyString_FromString("attr1"); - PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); - if (attr1->ob_ival != value->ob_ival) + PyIntObject *attr = obj->ob_type->tp_getattro(obj, name); + if (attr->ob_ival != value->ob_ival) { PyErr_SetString(PyExc_ValueError, "tp_getattro returned wrong value"); return NULL; } Py_DECREF(name); - Py_DECREF(attr1); + Py_DECREF(attr); + name = PyString_FromString("attr2"); + attr = obj->ob_type->tp_getattro(obj, name); + if (attr == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) + { + PyErr_Clear(); + } else { + PyErr_SetString(PyExc_ValueError, + "tp_getattro should have raised"); + return NULL; + } + Py_DECREF(name); Py_RETURN_TRUE; ''' ) @@ -637,7 +648,7 @@ IntLikeObject *intObj; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; @@ -657,7 +668,7 @@ IntLikeObjectNoOp *intObjNoOp; long intval; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type_NoOp.tp_flags |= Py_TPFLAGS_CHECKTYPES; diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -45,7 +45,7 @@ # the newstr and the strsetitems are because the string is forced, # which is in turn because the optimizer doesn't know how to handle a - # getarrayitem_gc_i on a virtual string. It could be improved, but it + # gc_load_indexed_i on a virtual string. It could be improved, but it # is also true that in real life cases struct.unpack is called on # strings which come from the outside, so it's a minor issue. assert loop.match_by_id("unpack", """ @@ -55,17 +55,17 @@ strsetitem(p88, 1, i14) strsetitem(p88, 2, i17) strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + i91 = gc_load_indexed_i(p88, 0, 1, _, -4) """) def test_struct_object(self): def main(n): import struct - s = struct.Struct("i") + s = struct.Struct("ii") i = 1 while i < n: - buf = s.pack(i) # ID: pack - x = s.unpack(buf)[0] # ID: unpack + buf = s.pack(-1, i) # ID: pack + x = s.unpack(buf)[1] # ID: unpack i += x / i return i @@ -88,10 +88,15 @@ assert loop.match_by_id('unpack', """ # struct.unpack - p88 = newstr(4) - strsetitem(p88, 0, i11) - strsetitem(p88, 1, i14) - strsetitem(p88, 2, i17) - strsetitem(p88, 3, i20) - i91 = getarrayitem_gc_i(p88, 0, descr=) + p88 = newstr(8) + strsetitem(p88, 0, 255) + strsetitem(p88, 1, 255) + strsetitem(p88, 2, 255) + strsetitem(p88, 3, 255) + strsetitem(p88, 4, i11) + strsetitem(p88, 5, i14) + strsetitem(p88, 6, i17) + strsetitem(p88, 7, i20) + i90 = gc_load_indexed_i(p88, 0, 1, _, -4) + i91 = gc_load_indexed_i(p88, 4, 1, _, -4) """) diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -100,6 +100,7 @@ self.argtypes = argtypes def __call__(self, funcdesc, inputcells): + from rpython.rlib.objectmodel import NOT_CONSTANT from rpython.rtyper.lltypesystem import lltype args_s = [] from rpython.annotator import model as annmodel @@ -115,6 +116,9 @@ args_s.append(s_input) elif argtype is None: args_s.append(inputcells[i]) # no change + elif argtype is NOT_CONSTANT: + from rpython.annotator.model import not_const + args_s.append(not_const(inputcells[i])) else: args_s.append(annotation(argtype, bookkeeper=funcdesc.bookkeeper)) if len(inputcells) != len(args_s): diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -804,7 +804,7 @@ base_loc = self.make_sure_var_in_reg(boxes[0], boxes) ofs = boxes[1].getint() value_loc = self.make_sure_var_in_reg(boxes[2], boxes) - size = abs(boxes[3].getint()) + size = boxes[3].getint() ofs_size = default_imm_size if size < 8 else VMEM_imm_size if check_imm_arg(ofs, size=ofs_size): ofs_loc = imm(ofs) @@ -849,7 +849,7 @@ index_loc = self.make_sure_var_in_reg(boxes[1], boxes) assert boxes[3].getint() == 1 # scale ofs = boxes[4].getint() - size = abs(boxes[5].getint()) + size = boxes[5].getint() assert check_imm_arg(ofs) return [value_loc, base_loc, index_loc, imm(size), imm(ofs)] diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -13,6 +13,7 @@ from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rclass from rpython.rlib.clibffi import FFI_DEFAULT_ABI @@ -635,18 +636,9 @@ return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): + a = support.cast_arg(lltype.Ptr(descr.A), a) + array = a._obj assert index >= 0 - if descr.A is descr.OUTERA: - a = support.cast_arg(lltype.Ptr(descr.A), a) - else: - # we use rffi.cast instead of support.cast_arg because the types - # might not be "compatible" enough from the lltype point of - # view. In particular, this happens when we use - # str_storage_getitem, in which an rpy_string is casted to - # rpy_string_as_Signed (or similar) - a = rffi.cast(lltype.Ptr(descr.OUTERA), a) - a = getattr(a, descr.OUTERA._arrayfld) - array = a._obj return support.cast_result(descr.A.OF, array.getitem(index)) bh_getarrayitem_gc_pure_i = bh_getarrayitem_gc @@ -711,6 +703,24 @@ else: return self.bh_raw_load_i(struct, offset, descr) + def bh_gc_load_indexed_i(self, struct, index, scale, base_ofs, bytes): + if bytes == 1: T = rffi.UCHAR + elif bytes == 2: T = rffi.USHORT + elif bytes == 4: T = rffi.UINT + elif bytes == 8: T = rffi.ULONGLONG + elif bytes == -1: T = rffi.SIGNEDCHAR + elif bytes == -2: T = rffi.SHORT + elif bytes == -4: T = rffi.INT + elif bytes == -8: T = rffi.LONGLONG + else: raise NotImplementedError(bytes) + x = llop.gc_load_indexed(T, struct, index, scale, base_ofs) + return lltype.cast_primitive(lltype.Signed, x) + + def bh_gc_load_indexed_f(self, struct, index, scale, base_ofs, bytes): + if bytes != 8: + raise Exception("gc_load_indexed_f is only for 'double'!") + return llop.gc_load_indexed(rffi.DOUBLE, struct, index, scale, base_ofs) + def bh_increment_debug_counter(self, addr): p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr) p[0] += 1 diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -725,6 +725,16 @@ def bh_raw_load_f(self, addr, offset, descr): return self.read_float_at_mem(addr, offset) + def bh_gc_load_indexed_i(self, addr, index, scale, base_ofs, bytes): + offset = base_ofs + scale * index + return self.read_int_at_mem(addr, offset, abs(bytes), bytes < 0) + + def bh_gc_load_indexed_f(self, addr, index, scale, base_ofs, bytes): + # only for 'double'! + assert bytes == rffi.sizeof(lltype.Float) + offset = base_ofs + scale * index + return self.read_float_at_mem(addr, offset) + def bh_new(self, sizedescr): return self.gc_ll_descr.gc_malloc(sizedescr) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -126,11 +126,11 @@ def emit_gc_store_or_indexed(self, op, ptr_box, index_box, value_box, itemsize, factor, offset): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # - if factor == 1 and offset == 0: - args = [ptr_box, index_box, value_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), value_box, ConstInt(itemsize)] newload = ResOperation(rop.GC_STORE, args) else: args = [ptr_box, index_box, value_box, ConstInt(factor), @@ -153,18 +153,15 @@ index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - def _emit_mul_add_if_factor_offset_not_supported(self, index_box, factor, offset): - orig_factor = factor - # factor - must_manually_load_const = False # offset != 0 and not self.cpu.load_constant_offset - if factor != 1 and (factor not in self.cpu.load_supported_factors or \ - (not index_box.is_constant() and must_manually_load_const)): - # enter here if the factor is supported by the cpu - # OR the index is not constant and a new resop must be emitted - # to add the offset - if isinstance(index_box, ConstInt): - index_box = ConstInt(index_box.value * factor) - else: + def _emit_mul_if_factor_offset_not_supported(self, index_box, + factor, offset): + # Returns (factor, offset, index_box) where index_box is either + # a non-constant BoxInt or None. + if isinstance(index_box, ConstInt): + return 1, index_box.value * factor + offset, None + else: + if factor != 1 and factor not in self.cpu.load_supported_factors: + # the factor is supported by the cpu # x & (x - 1) == 0 is a quick test for power of 2 assert factor > 0 if (factor & (factor - 1)) == 0: @@ -174,20 +171,13 @@ index_box = ResOperation(rop.INT_MUL, [index_box, ConstInt(factor)]) self.emit_op(index_box) - factor = 1 - # adjust the constant offset - #if must_manually_load_const: - # if isinstance(index_box, ConstInt): - # index_box = ConstInt(index_box.value + offset) - # else: - # index_box = ResOperation(rop.INT_ADD, [index_box, ConstInt(offset)]) - # self.emit_op(index_box) - # offset = 0 - return factor, offset, index_box + factor = 1 + return factor, offset, index_box - def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, factor, offset, sign, type='i'): + def emit_gc_load_or_indexed(self, op, ptr_box, index_box, itemsize, + factor, offset, sign, type='i'): factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(index_box, + self._emit_mul_if_factor_offset_not_supported(index_box, factor, offset) # if sign: @@ -197,8 +187,8 @@ optype = type if op is not None: optype = op.type - if factor == 1 and offset == 0: - args = [ptr_box, index_box, ConstInt(itemsize)] + if index_box is None: + args = [ptr_box, ConstInt(offset), ConstInt(itemsize)] newload = ResOperation(OpHelpers.get_gc_load(optype), args) else: args = [ptr_box, index_box, ConstInt(factor), @@ -547,9 +537,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_depth) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = ResOperation(rop.NEW_ARRAY, [size], descr=descrs.arraydescr) @@ -560,9 +549,8 @@ ofs, size, sign = unpack_fielddescr(descrs.jfi_frame_size) if sign: size = -size - args = [ConstInt(frame_info), ConstInt(0), ConstInt(1), - ConstInt(ofs), ConstInt(size)] - size = ResOperation(rop.GC_LOAD_INDEXED_I, args) + args = [ConstInt(frame_info), ConstInt(ofs), ConstInt(size)] + size = ResOperation(rop.GC_LOAD_I, args) self.emit_op(size) frame = self.gen_malloc_nursery_varsize_frame(size) self.gen_initialize_tid(frame, descrs.arraydescr.tid) @@ -612,15 +600,12 @@ descr = self.cpu.getarraydescr_for_frame(arg.type) assert self.cpu.JITFRAME_FIXED_SIZE & 1 == 0 _, itemsize, _ = self.cpu.unpack_arraydescr_size(descr) - index = index_list[i] // itemsize # index is in bytes - # emit GC_LOAD_INDEXED - itemsize, basesize, _ = unpack_arraydescr(descr) - factor, offset, index_box = \ - self._emit_mul_add_if_factor_offset_not_supported(ConstInt(index), - itemsize, basesize) - args = [frame, index_box, arg, ConstInt(factor), - ConstInt(offset), ConstInt(itemsize)] - self.emit_op(ResOperation(rop.GC_STORE_INDEXED, args)) + array_offset = index_list[i] # index, already measured in bytes + # emit GC_STORE + _, basesize, _ = unpack_arraydescr(descr) + offset = basesize + array_offset + args = [frame, ConstInt(offset), arg, ConstInt(itemsize)] + self.emit_op(ResOperation(rop.GC_STORE, args)) descr = op.getdescr() assert isinstance(descr, JitCellToken) diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -30,13 +30,26 @@ class RewriteTests(object): def check_rewrite(self, frm_operations, to_operations, **namespace): - def trans_getarray_to_load(descr): - size = descr.basesize - if descr.is_item_signed(): - size = -size - return ','.join([str(n) for n in [descr.itemsize, - descr.basesize, - size]]) + def setfield(baseptr, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, + newvalue, descr.field_size) + def setarrayitem(baseptr, index, newvalue, descr): + assert isinstance(baseptr, str) + assert isinstance(index, (str, int)) + assert isinstance(newvalue, (str, int)) + assert not isinstance(descr, (str, int)) + if isinstance(index, int): + offset = descr.basesize + index * descr.itemsize + return 'gc_store(%s, %d, %s, %d)' % (baseptr, offset, + newvalue, descr.itemsize) + else: + return 'gc_store_indexed(%s, %s, %s, %d, %d, %s)' % ( + baseptr, index, newvalue, + descr.itemsize, descr.basesize, descr.itemsize) + # WORD = globals()['WORD'] S = lltype.GcStruct('S', ('x', lltype.Signed), ('y', lltype.Signed)) @@ -376,7 +389,7 @@ gc_store(p1, 0, 5678, 8) p2 = nursery_ptr_increment(p1, %(tdescr.size)d) gc_store(p2, 0, 1234, 8) - gc_store(p1, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) + %(setfield('p1', 0, tdescr.gc_fielddescrs[0]))s jump() """) @@ -485,7 +498,7 @@ """, """ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) - gc_store_indexed(p0, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strlendescr.field_size)s) jump(i0) """) @@ -611,19 +624,19 @@ %(strdescr.basesize + 16 * strdescr.itemsize + \ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) gc_store(p0, 0, %(strdescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, 14, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p0, %(strlendescr.offset)s, 14, %(strlendescr.field_size)s) gc_store(p0, 0, 0, %(strhashdescr.field_size)s) p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) gc_store(p1, 0, %(unicodedescr.tid)d, %(tiddescr.field_size)s) - gc_store_indexed(p1, 0, 10, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p1, %(unicodelendescr.offset)s, 10, %(unicodelendescr.field_size)s) gc_store(p1, 0, 0, %(unicodehashdescr.field_size)s) p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\ descr=unicodedescr) - gc_store_indexed(p2, 0, i2, 1, %(unicodelendescr.offset)s, %(unicodelendescr.field_size)s) + gc_store(p2, %(unicodelendescr.offset)s, i2, %(unicodelendescr.field_size)s) gc_store(p2, 0, 0, %(unicodehashdescr.field_size)s) p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) - gc_store_indexed(p3, 0, i2, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p3, %(strlendescr.offset)s, i2, %(strlendescr.field_size)s) gc_store(p3, 0, 0, %(strhashdescr.field_size)s) jump() """) @@ -636,7 +649,7 @@ """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p1, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump() """) @@ -650,7 +663,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -671,7 +684,7 @@ zero_array(p1, 0, 129, descr=cdescr) call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -693,7 +706,7 @@ zero_array(p1, 0, 130, descr=cdescr) call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -705,7 +718,7 @@ """, """ [p1, i2, p3] cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -725,7 +738,7 @@ zero_array(p1, 0, 5, descr=cdescr) label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) - gc_store_indexed(p1, i2, p3, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p1', 'i2', 'p3', cdescr))s jump() """) @@ -743,12 +756,12 @@ size = interiorzdescr.arraydescr.itemsize self.check_rewrite(""" [p1, p2] - setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) + setinteriorfield_gc(p1, 7, p2, descr=interiorzdescr) jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb_array(p1, 0, descr=wbdescr) - gc_store_indexed(p1, 0, p2, %(scale)s, %(offset)s, %(size)s) + cond_call_gc_wb_array(p1, 7, descr=wbdescr) + gc_store(p1, %(offset + 7 * scale)s, p2, %(size)s) jump(p1, p2) """, interiorzdescr=interiorzdescr, scale=scale, offset=offset, size=size) @@ -763,7 +776,7 @@ [p1] p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -781,7 +794,7 @@ p1 = nursery_ptr_increment(p0, %(tdescr.size)d) gc_store(p1, 0, 1234, %(tiddescr.field_size)s) # <<>> - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -798,7 +811,7 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, i2, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -816,8 +829,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 2, 3, descr=cdescr) - gc_store_indexed(p0, 1, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p1', cdescr))s + %(setarrayitem('p0', 0, 'p2', cdescr))s jump() """) @@ -835,8 +848,8 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 3, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s jump() """) @@ -855,9 +868,9 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 0, 5, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -878,11 +891,11 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 5, 0, descr=cdescr) - gc_store_indexed(p0, 3, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 4, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 2, p2, %(trans_getarray_to_load(cdescr))s) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 3, 'p1', cdescr))s + %(setarrayitem('p0', 4, 'p2', cdescr))s + %(setarrayitem('p0', 0, 'p1', cdescr))s + %(setarrayitem('p0', 2, 'p2', cdescr))s + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -901,10 +914,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -923,10 +936,10 @@ gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) zero_array(p0, 1, 4, descr=cdescr) - gc_store_indexed(p0, 0, p1, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) - gc_store_indexed(p0, 1, p2, %(trans_getarray_to_load(cdescr))s) + %(setarrayitem('p0', 1, 'p2', cdescr))s jump() """) @@ -955,7 +968,7 @@ gc_store(p0, 0, i3, %(blendescr.field_size)s) zero_array(p0, 0, i3, descr=bdescr) cond_call_gc_wb_array(p0, 0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(bdescr.basesize)s, 1) + %(setarrayitem('p0', 0, 'p1', bdescr))s jump() """) @@ -991,10 +1004,10 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) p1 = call_malloc_nursery_varsize(1, 1, i0, \ descr=strdescr) - gc_store_indexed(p1, 0, i0, 1, %(strlendescr.offset)s, %(strlendescr.field_size)s) + gc_store(p1, %(strlendescr.offset)s, i0, %(strlendescr.field_size)s) gc_store(p1, 0, 0, %(strhashdescr.field_size)s) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1012,7 +1025,7 @@ gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) label(p0, p1) cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) jump() """) @@ -1025,8 +1038,8 @@ """, """ [p0, p1, p2] cond_call_gc_wb(p0, descr=wbdescr) - gc_store_indexed(p0, 0, p1, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) - gc_store_indexed(p0, 0, p2, 1, %(tzdescr.offset)s, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p1, %(tzdescr.field_size)s) + gc_store(p0, %(tzdescr.offset)s, p2, %(tzdescr.field_size)s) jump(p1, p2, p0) """) @@ -1036,20 +1049,20 @@ i2 = call_assembler_i(i0, f0, descr=casmdescr) """, """ [i0, f0] - i1 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_size.field_size)s) + i1 = gc_load_i(ConstClass(frame_info), %(jfi_frame_size.offset)s, %(jfi_frame_size.field_size)s) p1 = call_malloc_nursery_varsize_frame(i1) gc_store(p1, 0, 0, %(tiddescr.field_size)s) - i2 = gc_load_indexed_i(ConstClass(frame_info), 0, 1, 1, %(jfi_frame_depth.field_size)s) - gc_store_indexed(p1, 0, 0, 1, 1, %(jf_extra_stack_depth.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_savedata.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_force_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_descr.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_guard_exc.field_size)s) - gc_store_indexed(p1, 0, NULL, 1, 1, %(jf_forward.field_size)s) + i2 = gc_load_i(ConstClass(frame_info), %(jfi_frame_depth.offset)s, %(jfi_frame_depth.field_size)s) + %(setfield('p1', 0, jf_extra_stack_depth))s + %(setfield('p1', 'NULL', jf_savedata))s + %(setfield('p1', 'NULL', jf_force_descr))s + %(setfield('p1', 'NULL', jf_descr))s + %(setfield('p1', 'NULL', jf_guard_exc))s + %(setfield('p1', 'NULL', jf_forward))s gc_store(p1, 0, i2, %(framelendescr.field_size)s) - gc_store_indexed(p1, 0, ConstClass(frame_info), 1, 1, %(jf_frame_info.field_size)s) - gc_store_indexed(p1, 0, i0, 8, 3, 8) - gc_store_indexed(p1, 1, f0, 8, 5, 8) + %(setfield('p1', 'ConstClass(frame_info)', jf_frame_info))s + gc_store(p1, 3, i0, 8) + gc_store(p1, 13, f0, 8) i3 = call_assembler_i(p1, descr=casmdescr) """) @@ -1101,7 +1114,7 @@ p0 = call_malloc_nursery(%(tdescr.size)d) gc_store(p0, 0, 5678, %(tiddescr.field_size)s) gc_store(p0, %(tdescr.gc_fielddescrs[0].offset)s, 0, %(tdescr.gc_fielddescrs[0].offset)s) - p1 = gc_load_indexed_r(p0, 0, 1, %(tzdescr.field_size)s, %(tzdescr.field_size)s) + p1 = gc_load_r(p0, %(tzdescr.offset)s, %(tzdescr.field_size)s) jump(p1) """) @@ -1155,23 +1168,19 @@ # 'i5 = int_add(i1,%(raw_sfdescr.basesize)s);' # 'gc_store(p0,i5,i2,%(raw_sfdescr.itemsize)s)'], [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = getfield_gc_f(p0,descr=ydescr)' '->' - 'i3 = gc_load_indexed_f(p0,0,1,%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_raw(p0,i1,descr=ydescr)' '->' - 'i3 = gc_store_indexed(p0,0,i1,1,' - '%(ydescr.offset)s,%(ydescr.field_size)s)'], - [True, (1,2,4,8), 'i3 = setfield_gc(p0,p0,descr=zdescr)' '->' + 'i3 = gc_load_f(p0,%(ydescr.offset)s,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_raw(p0,i1,descr=ydescr)' '->' + 'gc_store(p0,%(ydescr.offset)s,i1,%(ydescr.field_size)s)'], + [True, (1,2,4,8), 'setfield_gc(p0,p0,descr=zdescr)' '->' 'cond_call_gc_wb(p0, descr=wbdescr);' - 'i3 = gc_store_indexed(p0,0,p0,1,' - '%(zdescr.offset)s,%(zdescr.field_size)s)'], + 'gc_store(p0,%(zdescr.offset)s,p0,%(zdescr.field_size)s)'], [False, (1,), 'i3 = arraylen_gc(p0, descr=adescr)' '->' 'i3 = gc_load_i(p0,0,%(adescr.itemsize)s)'], #[False, (1,), 'i3 = strlen(p0)' '->' # 'i3 = gc_load_i(p0,' # '%(strlendescr.offset)s,%(strlendescr.field_size)s)'], [True, (1,), 'i3 = strlen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(strlendescr.offset)s,' '%(strlendescr.field_size)s)'], #[False, (1,), 'i3 = unicodelen(p0)' '->' @@ -1179,7 +1188,7 @@ # '%(unicodelendescr.offset)s,' # '%(unicodelendescr.field_size)s)'], [True, (1,), 'i3 = unicodelen(p0)' '->' - 'i3 = gc_load_indexed_i(p0,0,1,' + 'i3 = gc_load_i(p0,' '%(unicodelendescr.offset)s,' '%(unicodelendescr.field_size)s)'], diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -20,7 +20,7 @@ PPCBuilder, PPCGuardToken) from rpython.jit.backend.ppc.regalloc import TempPtr, TempInt from rpython.jit.backend.llsupport import symbolic, jitframe -from rpython.jit.backend.llsupport.descr import InteriorFieldDescr, CallDescr +from rpython.jit.backend.llsupport.descr import CallDescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -706,8 +706,10 @@ _mixin_ = True - def _write_to_mem(self, value_loc, base_loc, ofs, size): - if size.value == 8: + def _write_to_mem(self, value_loc, base_loc, ofs, size_loc): + assert size_loc.is_imm() + size = size_loc.value + if size == 8: if value_loc.is_fp_reg(): if ofs.is_imm(): self.mc.stfd(value_loc.value, base_loc.value, ofs.value) @@ -718,17 +720,17 @@ self.mc.std(value_loc.value, base_loc.value, ofs.value) else: self.mc.stdx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if ofs.is_imm(): self.mc.stw(value_loc.value, base_loc.value, ofs.value) else: self.mc.stwx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if ofs.is_imm(): self.mc.sth(value_loc.value, base_loc.value, ofs.value) else: self.mc.sthx(value_loc.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.stb(value_loc.value, base_loc.value, ofs.value) else: @@ -736,18 +738,35 @@ else: assert 0, "size not supported" - def emit_setfield_gc(self, op, arglocs, regalloc): - value_loc, base_loc, ofs, size = arglocs - self._write_to_mem(value_loc, base_loc, ofs, size) + def emit_gc_store(self, op, arglocs, regalloc): + value_loc, base_loc, ofs_loc, size_loc = arglocs + self._write_to_mem(value_loc, base_loc, ofs_loc, size_loc) - emit_setfield_raw = emit_setfield_gc - emit_zero_ptr_field = emit_setfield_gc + def _apply_offset(self, index_loc, ofs_loc): + # If offset != 0 then we have to add it here. Note that + # mc.addi() would not be valid with operand r0. + assert ofs_loc.is_imm() # must be an immediate... + assert _check_imm_arg(ofs_loc.getint()) # ...that fits 16 bits + assert index_loc is not r.SCRATCH2 + # (simplified version of _apply_scale()) + if ofs_loc.value > 0: + self.mc.addi(r.SCRATCH2.value, index_loc.value, ofs_loc.value) + index_loc = r.SCRATCH2 + return index_loc - def _load_from_mem(self, res, base_loc, ofs, size, signed): + def emit_gc_store_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, value_loc, ofs_loc, size_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._write_to_mem(value_loc, base_loc, index_loc, size_loc) + + def _load_from_mem(self, res, base_loc, ofs, size_loc, sign_loc): # res, base_loc, ofs, size and signed are all locations assert base_loc is not r.SCRATCH - sign = signed.value - if size.value == 8: + assert size_loc.is_imm() + size = size_loc.value + assert sign_loc.is_imm() + sign = sign_loc.value + if size == 8: if res.is_fp_reg(): if ofs.is_imm(): self.mc.lfd(res.value, base_loc.value, ofs.value) @@ -758,7 +777,7 @@ self.mc.ld(res.value, base_loc.value, ofs.value) else: self.mc.ldx(res.value, base_loc.value, ofs.value) - elif size.value == 4: + elif size == 4: if IS_PPC_64 and sign: if ofs.is_imm(): self.mc.lwa(res.value, base_loc.value, ofs.value) @@ -769,7 +788,7 @@ self.mc.lwz(res.value, base_loc.value, ofs.value) else: self.mc.lwzx(res.value, base_loc.value, ofs.value) - elif size.value == 2: + elif size == 2: if sign: if ofs.is_imm(): self.mc.lha(res.value, base_loc.value, ofs.value) @@ -780,7 +799,7 @@ self.mc.lhz(res.value, base_loc.value, ofs.value) else: self.mc.lhzx(res.value, base_loc.value, ofs.value) - elif size.value == 1: + elif size == 1: if ofs.is_imm(): self.mc.lbz(res.value, base_loc.value, ofs.value) else: @@ -790,22 +809,28 @@ else: assert 0, "size not supported" - def _genop_getfield(self, op, arglocs, regalloc): - base_loc, ofs, res, size, sign = arglocs - self._load_from_mem(res, base_loc, ofs, size, sign) + def _genop_gc_load(self, op, arglocs, regalloc): + base_loc, ofs_loc, res_loc, size_loc, sign_loc = arglocs + self._load_from_mem(res_loc, base_loc, ofs_loc, size_loc, sign_loc) - emit_getfield_gc_i = _genop_getfield - emit_getfield_gc_r = _genop_getfield - emit_getfield_gc_f = _genop_getfield - emit_getfield_gc_pure_i = _genop_getfield - emit_getfield_gc_pure_r = _genop_getfield - emit_getfield_gc_pure_f = _genop_getfield - emit_getfield_raw_i = _genop_getfield - emit_getfield_raw_f = _genop_getfield + emit_gc_load_i = _genop_gc_load + emit_gc_load_r = _genop_gc_load + emit_gc_load_f = _genop_gc_load + + def _genop_gc_load_indexed(self, op, arglocs, regalloc): + base_loc, index_loc, res_loc, ofs_loc, size_loc, sign_loc = arglocs + index_loc = self._apply_offset(index_loc, ofs_loc) + self._load_from_mem(res_loc, base_loc, index_loc, size_loc, sign_loc) + + emit_gc_load_indexed_i = _genop_gc_load_indexed + emit_gc_load_indexed_r = _genop_gc_load_indexed + emit_gc_load_indexed_f = _genop_gc_load_indexed SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc @@ -827,6 +852,9 @@ return scratch_loc def _apply_scale(self, ofs, index_loc, itemsize): + # XXX should die now that getarrayitem and getinteriorfield are gone + # but can't because of emit_zero_array() at the moment + # For arrayitem and interiorfield reads and writes: this returns an # offset suitable for use in ld/ldx or similar instructions. # The result will be either the register r2 or a 16-bit immediate. @@ -857,44 +885,6 @@ index_loc = r.SCRATCH2 return index_loc - def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): - (base_loc, index_loc, res_loc, ofs_loc, - itemsize, fieldsize, fieldsign) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - - emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield - emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield - - def emit_setinteriorfield_gc(self, op, arglocs, regalloc): - (base_loc, index_loc, value_loc, ofs_loc, - itemsize, fieldsize) = arglocs - ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) - self._write_to_mem(value_loc, base_loc, ofs_loc, fieldsize) - - emit_setinteriorfield_raw = emit_setinteriorfield_gc - - def emit_arraylen_gc(self, op, arglocs, regalloc): - res, base_loc, ofs = arglocs - self.mc.load(res.value, base_loc.value, ofs.value) - - emit_setarrayitem_gc = emit_setinteriorfield_gc - emit_setarrayitem_raw = emit_setarrayitem_gc - - emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield - emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield - emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield - - emit_raw_store = emit_setarrayitem_gc - emit_raw_load_i = _genop_getarray_or_interiorfield - emit_raw_load_f = _genop_getarray_or_interiorfield - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -998,10 +988,6 @@ _mixin_ = True - emit_strlen = FieldOpAssembler._genop_getfield - emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc - def emit_copystrcontent(self, op, arglocs, regalloc): self._emit_copycontent(arglocs, is_unicode=False) @@ -1059,12 +1045,8 @@ class UnicodeOpAssembler(object): - _mixin_ = True - - emit_unicodelen = FieldOpAssembler._genop_getfield - emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield - emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc + # empty! class AllocOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -17,12 +17,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.backend.llsupport.descr import unpack_arraydescr import rpython.jit.backend.ppc.register as r import rpython.jit.backend.ppc.condition as c -from rpython.jit.backend.llsupport.descr import unpack_arraydescr -from rpython.jit.backend.llsupport.descr import unpack_fielddescr -from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import debug_print @@ -691,159 +688,69 @@ src_locations2, dst_locations2, fptmploc) return [] - def prepare_setfield_gc(self, op): - ofs, size, _ = unpack_fielddescr(op.getdescr()) + def prepare_gc_store(self, op): base_loc = self.ensure_reg(op.getarg(0)) - value_loc = self.ensure_reg(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [value_loc, base_loc, ofs_loc, imm(size)] + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + size_loc = self.ensure_reg_or_any_imm(op.getarg(3)) + return [value_loc, base_loc, ofs_loc, size_loc] - prepare_setfield_raw = prepare_setfield_gc + def _prepare_gc_load(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) + self.free_op_vars() + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(2) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, ofs_loc, res_loc, size_loc, imm(sign)] - def _prepare_getfield(self, op): - ofs, size, sign = unpack_fielddescr(op.getdescr()) + prepare_gc_load_i = _prepare_gc_load + prepare_gc_load_r = _prepare_gc_load + prepare_gc_load_f = _prepare_gc_load + + def prepare_gc_store_indexed(self, op): base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + value_loc = self.ensure_reg(op.getarg(2)) + assert op.getarg(3).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(4)) + assert ofs_loc.is_imm() # the arg(4) should always be a small constant + size_loc = self.ensure_reg_or_any_imm(op.getarg(5)) + return [base_loc, index_loc, value_loc, ofs_loc, size_loc] + + def _prepare_gc_load_indexed(self, op): + base_loc = self.ensure_reg(op.getarg(0)) + index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) + assert op.getarg(2).getint() == 1 # scale + ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(3)) + assert ofs_loc.is_imm() # the arg(3) should always be a small constant self.free_op_vars() - res = self.force_allocate_reg(op) - return [base_loc, ofs_loc, res, imm(size), imm(sign)] + res_loc = self.force_allocate_reg(op) + size_box = op.getarg(4) + assert isinstance(size_box, ConstInt) + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: + sign = 1 + else: + sign = 0 + return [base_loc, index_loc, res_loc, ofs_loc, size_loc, imm(sign)] - prepare_getfield_gc_i = _prepare_getfield - prepare_getfield_gc_r = _prepare_getfield - prepare_getfield_gc_f = _prepare_getfield - prepare_getfield_raw_i = _prepare_getfield - prepare_getfield_raw_f = _prepare_getfield - prepare_getfield_gc_pure_i = _prepare_getfield - prepare_getfield_gc_pure_r = _prepare_getfield - prepare_getfield_gc_pure_f = _prepare_getfield + prepare_gc_load_indexed_i = _prepare_gc_load_indexed + prepare_gc_load_indexed_r = _prepare_gc_load_indexed + prepare_gc_load_indexed_f = _prepare_gc_load_indexed def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def _prepare_getinteriorfield(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, sign = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(itemsize), imm(fieldsize), imm(sign)] - - prepare_getinteriorfield_gc_i = _prepare_getinteriorfield - prepare_getinteriorfield_gc_r = _prepare_getinteriorfield - prepare_getinteriorfield_gc_f = _prepare_getinteriorfield - - def prepare_setinteriorfield_gc(self, op): - t = unpack_interiorfielddescr(op.getdescr()) - ofs, itemsize, fieldsize, _ = t - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(itemsize), imm(fieldsize)] - - prepare_setinteriorfield_raw = prepare_setinteriorfield_gc - - def prepare_arraylen_gc(self, op): - arraydescr = op.getdescr() - assert isinstance(arraydescr, ArrayDescr) - ofs = arraydescr.lendescr.offset - assert _check_imm_arg(ofs) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - res = self.force_allocate_reg(op) - return [res, base_loc, imm(ofs)] - - def prepare_setarrayitem_gc(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - imm_size = imm(size) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - - prepare_setarrayitem_raw = prepare_setarrayitem_gc - - def prepare_raw_store(self, op): - size, ofs, _ = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, index_loc, value_loc, ofs_loc, - imm(1), imm(size)] - - def _prepare_getarrayitem(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(size) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(sign)] - - prepare_getarrayitem_gc_i = _prepare_getarrayitem - prepare_getarrayitem_gc_r = _prepare_getarrayitem - prepare_getarrayitem_gc_f = _prepare_getarrayitem - prepare_getarrayitem_raw_i = _prepare_getarrayitem - prepare_getarrayitem_raw_f = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem - prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - - def _prepare_raw_load(self, op): - size, ofs, sign = unpack_arraydescr(op.getdescr()) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, index_loc, result_loc, ofs_loc, - imm(1), imm(size), imm(sign)] - - prepare_raw_load_i = _prepare_raw_load - prepare_raw_load_f = _prepare_raw_load - - def prepare_strlen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_strgetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_strsetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.STR, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - def prepare_copystrcontent(self, op): src_ptr_loc = self.ensure_reg(op.getarg(0)) dst_ptr_loc = self.ensure_reg(op.getarg(1)) @@ -856,37 +763,6 @@ prepare_copyunicodecontent = prepare_copystrcontent - def prepare_unicodelen(self, op): - basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] - - def prepare_unicodegetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - self.free_op_vars() - result_loc = self.force_allocate_reg(op) - imm_size = imm(itemsize) - return [base_loc, index_loc, result_loc, ofs_loc, - imm_size, imm_size, imm(0)] - - def prepare_unicodesetitem(self, op): - basesize, itemsize, _ = symbolic.get_array_token(rstr.UNICODE, - self.cpu.translate_support_code) - base_loc = self.ensure_reg(op.getarg(0)) - index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) - value_loc = self.ensure_reg(op.getarg(2)) - ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) - imm_size = imm(itemsize) - return [base_loc, index_loc, value_loc, ofs_loc, - imm_size, imm_size] - prepare_same_as_i = helper.prepare_unary_op prepare_same_as_r = helper.prepare_unary_op prepare_same_as_f = helper.prepare_unary_op @@ -1078,12 +954,6 @@ arglocs = self._prepare_guard(op) return arglocs - def prepare_zero_ptr_field(self, op): - base_loc = self.ensure_reg(op.getarg(0)) - ofs_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) - value_loc = self.ensure_reg(ConstInt(0)) - return [value_loc, base_loc, ofs_loc, imm(WORD)] - def prepare_zero_array(self, op): itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -21,6 +21,9 @@ IS_64_BIT = True backend_name = 'ppc64' + # can an ISA instruction handle a factor to the offset? + load_supported_factors = (1,) + from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE frame_reg = r.SP all_reg_indexes = [-1] * 32 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -4,8 +4,7 @@ import os, sys from rpython.jit.backend.llsupport import symbolic -from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, - unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) +from rpython.jit.backend.llsupport.descr import CallDescr, unpack_arraydescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempVar, compute_vars_longevity, is_comparison_or_ovf_op, @@ -1039,7 +1038,8 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) size_box = op.getarg(3) assert isinstance(size_box, ConstInt) - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1061,7 +1061,8 @@ assert isinstance(size_box, ConstInt) factor = scale_box.value offset = offset_box.value - size = abs(size_box.value) + size = size_box.value + assert size >= 1 if size == 1: need_lower_byte = True else: @@ -1083,9 +1084,9 @@ result_loc = self.force_allocate_reg(op) size_box = op.getarg(2) assert isinstance(size_box, ConstInt) - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 @@ -1108,9 +1109,9 @@ assert isinstance(size_box, ConstInt) scale = scale_box.value offset = offset_box.value - size = size_box.value - size_loc = imm(abs(size)) - if size < 0: + nsize = size_box.value # negative for "signed" + size_loc = imm(abs(nsize)) + if nsize < 0: sign_loc = imm1 else: sign_loc = imm0 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1021,18 +1021,20 @@ kind = getkind(op.result.concretetype)[0] return SpaceOperation('getinteriorfield_gc_%s' % kind, args, op.result) - elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): - # special-case 2: GcStruct with Array field - v_inst, c_field, v_index = op.args - STRUCT = v_inst.concretetype.TO - ARRAY = getattr(STRUCT, c_field.value) - assert isinstance(ARRAY, lltype.Array) - arraydescr = self.cpu.arraydescrof(STRUCT) - kind = getkind(op.result.concretetype)[0] - assert kind in ('i', 'f') - return SpaceOperation('getarrayitem_gc_%s' % kind, - [op.args[0], v_index, arraydescr], - op.result) + #elif isinstance(op.args[0].concretetype.TO, lltype.GcStruct): + # # special-case 2: GcStruct with Array field + # ---was added in the faster-rstruct branch,--- + # ---no longer directly supported--- + # v_inst, c_field, v_index = op.args + # STRUCT = v_inst.concretetype.TO + # ARRAY = getattr(STRUCT, c_field.value) + # assert isinstance(ARRAY, lltype.Array) + # arraydescr = self.cpu.arraydescrof(STRUCT) + # kind = getkind(op.result.concretetype)[0] + # assert kind in ('i', 'f') + # return SpaceOperation('getarrayitem_gc_%s' % kind, + # [op.args[0], v_index, arraydescr], + # op.result) else: assert False, 'not supported' @@ -1084,6 +1086,25 @@ return SpaceOperation('raw_load_%s' % kind, [op.args[0], op.args[1], descr], op.result) + def rewrite_op_gc_load_indexed(self, op): + T = op.result.concretetype + kind = getkind(T)[0] + assert kind != 'r' + descr = self.cpu.arraydescrof(rffi.CArray(T)) + if (not isinstance(op.args[2], Constant) or + not isinstance(op.args[3], Constant)): + raise NotImplementedError("gc_load_indexed: 'scale' and 'base_ofs'" + " should be constants") + # xxx hard-code the size in bytes at translation time, which is + # probably fine and avoids lots of issues later + bytes = descr.get_item_size_in_bytes() + if descr.is_item_signed(): + bytes = -bytes + c_bytes = Constant(bytes, lltype.Signed) + return SpaceOperation('gc_load_indexed_%s' % kind, + [op.args[0], op.args[1], + op.args[2], op.args[3], c_bytes], op.result) + def _rewrite_equality(self, op, opname): arg0, arg1 = op.args if isinstance(arg0, Constant) and not arg0.value: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1434,6 +1434,13 @@ def bhimpl_raw_load_f(cpu, addr, offset, arraydescr): return cpu.bh_raw_load_f(addr, offset, arraydescr) + @arguments("cpu", "r", "i", "i", "i", "i", returns="i") + def bhimpl_gc_load_indexed_i(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_i(addr, index,scale,base_ofs, bytes) + @arguments("cpu", "r", "i", "i", "i", "i", returns="f") + def bhimpl_gc_load_indexed_f(cpu, addr, index, scale, base_ofs, bytes): + return cpu.bh_gc_load_indexed_f(addr, index,scale,base_ofs, bytes) + @arguments("r", "d", "d") def bhimpl_record_quasiimmut_field(struct, fielddescr, mutatefielddescr): pass diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -68,8 +68,8 @@ return box.value def repr_rpython(box, typechars): - return '%s/%s%d' % (box._get_hash_(), typechars, - compute_unique_id(box)) + return '%s/%s' % (box._get_hash_(), typechars, + ) #compute_unique_id(box)) class XxxAbstractValue(object): diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -526,16 +526,10 @@ cf.do_setfield(self, op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem it might happen that op.getarg(0) is - # a virtual string, NOT an array. In that case, we cannot cache the - # getarrayitem as if it were an array, obviously. In theory we could - # improve by writing special code to interpter the buffer of the - # virtual string as if it were an array, but it looks complicate, - # fragile and not worth it. arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) cf = None - if indexb.is_constant() and not arrayinfo.is_vstring(): + if indexb.is_constant(): index = indexb.getint() arrayinfo.getlenbound(None).make_gt_const(index) # use the cache on (arraydescr, index), which is a constant @@ -552,7 +546,7 @@ self.make_nonnull(op.getarg(0)) self.emit_operation(op) # the remember the result of reading the array item - if cf is not None and not arrayinfo.is_vstring(): + if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -24,9 +24,6 @@ def is_virtual(self): return False - def is_vstring(self): - return False - def is_precise(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -271,10 +271,8 @@ self.emit_operation(op) def optimize_GETARRAYITEM_GC_I(self, op): - # When using str_storage_getitem we op.getarg(0) is a string, NOT an - # array, hence the check. In that case, it will be forced opinfo = self.getptrinfo(op.getarg(0)) - if opinfo and opinfo.is_virtual() and not opinfo.is_vstring(): + if opinfo and opinfo.is_virtual(): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: item = opinfo.getitem(op.getdescr(), indexbox.getint()) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -62,9 +62,6 @@ self.mode = mode self.length = length - def is_vstring(self): - return True - def getlenbound(self, mode): from rpython.jit.metainterp.optimizeopt import intutils diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -801,6 +801,27 @@ return self.execute_with_descr(rop.RAW_LOAD_F, arraydescr, addrbox, offsetbox) + def _remove_symbolics(self, c): + if not we_are_translated(): + from rpython.rtyper.lltypesystem import ll2ctypes + assert isinstance(c, ConstInt) + c = ConstInt(ll2ctypes.lltype2ctypes(c.value)) + return c + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_i(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_I, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + + @arguments("box", "box", "box", "box", "box") + def opimpl_gc_load_indexed_f(self, addrbox, indexbox, + scalebox, baseofsbox, bytesbox): + return self.execute(rop.GC_LOAD_INDEXED_F, addrbox, indexbox, + self._remove_symbolics(scalebox), + self._remove_symbolics(baseofsbox), bytesbox) + @arguments("box") def opimpl_hint_force_virtualizable(self, box): self.metainterp.gen_store_back_in_vable(box) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1212,8 +1212,12 @@ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- # same paramters as GC_LOAD, but one additional for the value to store - # note that the itemsize is not signed! + # note that the itemsize is not signed (always > 0) # (gcptr, index, value, [scale, base_offset,] itemsize) + # invariants for GC_STORE: index is constant, but can be large + # invariants for GC_STORE_INDEXED: index is a non-constant box; + # scale is a constant; + # base_offset is a small constant 'GC_STORE/4d/n', 'GC_STORE_INDEXED/6d/n', diff --git a/rpython/jit/metainterp/test/test_strstorage.py b/rpython/jit/metainterp/test/test_strstorage.py --- a/rpython/jit/metainterp/test/test_strstorage.py +++ b/rpython/jit/metainterp/test/test_strstorage.py @@ -19,7 +19,7 @@ res = self.interp_operations(f, [], supports_singlefloats=True) # kind = getkind(TYPE)[0] # 'i' or 'f' - self.check_operations_history({'getarrayitem_gc_%s' % kind: 1, + self.check_operations_history({'gc_load_indexed_%s' % kind: 1, 'finish': 1}) # if TYPE == lltype.SingleFloat: @@ -29,8 +29,8 @@ return longlong.int2singlefloat(res) return res - def str_storage_supported(self, TYPE): - py.test.skip('this is not a JIT test') + #def str_storage_supported(self, TYPE): + # py.test.skip('this is not a JIT test') def test_force_virtual_str_storage(self): byteorder = sys.byteorder @@ -48,6 +48,6 @@ 'strsetitem': 1, # str forcing 'call_pure_r': 1, # str forcing (copystrcontent) 'guard_no_exception': 1, # str forcing - 'getarrayitem_gc_i': 1, # str_storage_getitem + 'gc_load_indexed_i': 1, # str_storage_getitem 'finish': 1 }) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -114,6 +114,8 @@ specialize = _Specialize() +NOT_CONSTANT = object() # to use in enforceargs() + def enforceargs(*types_, **kwds): """ Decorate a function with forcing of RPython-level types on arguments. None means no enforcing. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -9,7 +9,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask, widen from rpython.rlib.objectmodel import ( - specialize, enforceargs, register_replacement_for) + specialize, enforceargs, register_replacement_for, NOT_CONSTANT) from rpython.rlib.signature import signature from rpython.rlib import types from rpython.annotator.model import s_Str0 @@ -415,7 +415,7 @@ @replace_os_function('open') @specialize.argtype(0) - at enforceargs(None, int, int, typecheck=False) + at enforceargs(NOT_CONSTANT, int, int, typecheck=False) def open(path, flags, mode): if _prefer_unicode(path): fd = c_wopen(_as_unicode0(path), flags, mode) diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -11,7 +11,6 @@ from rpython.rlib.rstruct.standardfmttable import native_is_bigendian from rpython.rlib.rstruct.error import StructError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -12,7 +12,7 @@ from rpython.rlib.rstruct import ieee from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib import rarithmetic from rpython.rtyper.lltypesystem import rffi @@ -185,13 +185,14 @@ data = fmtiter.read(size) fmtiter.appendobj(ieee.unpack_float(data, fmtiter.bigendian)) return - if not str_storage_supported(TYPE): - # this happens e.g. on win32 and ARM32: we cannot read the string - # content as an array of doubles because it's not properly - # aligned. But we can read a longlong and convert to float - assert TYPE == rffi.DOUBLE - assert rffi.sizeof(TYPE) == 8 - return unpack_longlong2float(fmtiter) + ## XXX check if the following code is still needed + ## if not str_storage_supported(TYPE): + ## # this happens e.g. on win32 and ARM32: we cannot read the string + ## # content as an array of doubles because it's not properly + ## # aligned. But we can read a longlong and convert to float + ## assert TYPE == rffi.DOUBLE + ## assert rffi.sizeof(TYPE) == 8 + ## return unpack_longlong2float(fmtiter) try: # fast path val = unpack_fastpath(TYPE)(fmtiter) @@ -246,7 +247,7 @@ @specialize.argtype(0) def unpack_int_fastpath_maybe(fmtiter): - if fmtiter.bigendian != native_is_bigendian or not str_storage_supported(TYPE): + if fmtiter.bigendian != native_is_bigendian or not native_is_ieee754: ## or not str_storage_supported(TYPE): return False try: intvalue = unpack_fastpath(TYPE)(fmtiter) diff --git a/rpython/rlib/rurandom.py b/rpython/rlib/rurandom.py --- a/rpython/rlib/rurandom.py +++ b/rpython/rlib/rurandom.py @@ -86,27 +86,29 @@ else: # Posix implementation def init_urandom(): """NOT_RPYTHON - Return an array of one int, initialized to 0. - It is filled automatically the first time urandom() is called. """ - return lltype.malloc(rffi.CArray(lltype.Signed), 1, - immortal=True, zero=True) + return None def urandom(context, n): "Read n bytes from /dev/urandom." result = '' if n == 0: return result - if not context[0]: - context[0] = os.open("/dev/urandom", os.O_RDONLY, 0777) - while n > 0: - try: - data = os.read(context[0], n) - except OSError, e: - if e.errno != errno.EINTR: - raise - data = '' - result += data - n -= len(data) + # XXX should somehow cache the file descriptor. It's a mess. + # CPython has a 99% solution and hopes for the remaining 1% + # not to occur. For now, we just don't cache the file + # descriptor (any more... 6810f401d08e). + fd = os.open("/dev/urandom", os.O_RDONLY, 0777) + try: + while n > 0: + try: + data = os.read(fd, n) + except OSError, e: + if e.errno != errno.EINTR: + raise + data = '' + result += data + n -= len(data) + finally: + os.close(fd) return result - diff --git a/rpython/rlib/strstorage.py b/rpython/rlib/strstorage.py --- a/rpython/rlib/strstorage.py +++ b/rpython/rlib/strstorage.py @@ -9,54 +9,31 @@ # rstr.py:copy_string_contents), which has no chance to work during # tracing # -# 2. use llop.raw_load: despite the name, llop.raw_load DOES support reading -# from GC pointers. However: -# -# a. we would like to use a CompositeOffset as the offset (using the -# same logic as in rstr.py:_get_raw_str_buf), but this is not (yet) -# supported before translation: it works only if you pass an actual -# integer -# -# b. raw_load from a GC pointer is not (yet) supported by the -# JIT. There are plans to introduce a gc_load operation: when it -# will be there, we could fix the issue above and actually use it to -# implement str_storage_getitem -# -# 3. the actual solution: cast rpy_string to a GcStruct which has the very +# 2. cast rpy_string to a GcStruct which has the very # same layout, with the only difference that its 'chars' field is no # longer an Array(Char) but e.e. an Array(Signed). Then, we just need to -# read the appropriate index into the array +# read the appropriate index into the array. To support this solution, +# the JIT's optimizer needed a few workarounds. This was removed. +# +# 3. use the newly introduced 'llop.gc_load_indexed'. +# -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -from rpython.rtyper.lltypesystem.rstr import STR, _get_raw_str_buf + +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.annlowlevel import llstr -from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.objectmodel import specialize - at specialize.memo() -def _rpy_string_as_type(TP): - # sanity check that STR is actually what we think it is - assert STR._flds == { - 'hash': lltype.Signed, - 'chars': lltype.Array(lltype.Char, hints={'immutable': True}) - } - STR_AS_TP = lltype.GcStruct('rpy_string_as_%s' % TP, - ('hash', lltype.Signed), - ('chars', lltype.Array(TP, hints={'immutable': True}))) - return STR_AS_TP - - at specialize.arg(0) -def str_storage_supported(TP): - # on some architectures (e.g. win32 and arm32) an array of longlongs needs - # to be aligned at 8 bytes boundaries, so we cannot safely cast from STR - # to STR_AS_TP. In that case, we str_storage_getitem is simply not - # supported - return rffi.sizeof(TP) <= rffi.sizeof(lltype.Signed) @specialize.ll() -def str_storage_getitem(TP, s, index): - assert str_storage_supported(TP) # sanity check - STR_AS_TP = _rpy_string_as_type(TP) +def str_storage_getitem(TP, s, byte_offset): + # WARNING: the 'byte_offset' is, as its name says, measured in bytes; + # however, it should be aligned for TP, otherwise on some platforms this + # code will crash! lls = llstr(s) - str_as_tp = rffi.cast(lltype.Ptr(STR_AS_TP), lls) - index = index / rffi.sizeof(TP) - return str_as_tp.chars[index] + base_ofs = (llmemory.offsetof(STR, 'chars') + + llmemory.itemoffsetof(STR.chars, 0)) + scale_factor = llmemory.sizeof(lltype.Char) + return llop.gc_load_indexed(TP, lls, byte_offset, + scale_factor, base_ofs) diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -4,7 +4,7 @@ r_dict, UnboxedValue, Symbolic, compute_hash, compute_identity_hash, compute_unique_id, current_object_addr_as_int, we_are_translated, prepare_dict_update, reversed_dict, specialize, enforceargs, newlist_hint, - resizelist_hint, is_annotation_constant, always_inline, + resizelist_hint, is_annotation_constant, always_inline, NOT_CONSTANT, iterkeys_with_hash, iteritems_with_hash, contains_with_hash, setitem_with_hash, getitem_with_hash, delitem_with_hash, import_from_mixin) from rpython.translator.translator import TranslationContext, graphof @@ -529,6 +529,18 @@ TYPES = [v.concretetype for v in graph.getargs()] assert TYPES == [lltype.Signed, lltype.Float] +def test_enforceargs_not_constant(): + from rpython.translator.translator import TranslationContext, graphof + @enforceargs(NOT_CONSTANT) + def f(a): + return a + def f42(): + return f(42) + t = TranslationContext() + a = t.buildannotator() + s = a.build_types(f42, []) + assert not hasattr(s, 'const') + def getgraph(f, argtypes): from rpython.translator.translator import TranslationContext, graphof diff --git a/rpython/rlib/test/test_strstorage.py b/rpython/rlib/test/test_strstorage.py --- a/rpython/rlib/test/test_strstorage.py +++ b/rpython/rlib/test/test_strstorage.py @@ -2,7 +2,7 @@ import sys import struct from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.strstorage import str_storage_getitem, str_storage_supported +from rpython.rlib.strstorage import str_storage_getitem from rpython.rlib.rarithmetic import r_singlefloat from rpython.rtyper.test.tool import BaseRtypingTest @@ -10,14 +10,14 @@ class BaseStrStorageTest: - def test_str_getitem_supported(self): - if IS_32BIT: - expected = False - else: - expected = True - # - assert self.str_storage_supported(rffi.LONGLONG) == expected - assert self.str_storage_supported(rffi.DOUBLE) == expected + ## def test_str_getitem_supported(self): + ## if IS_32BIT: + ## expected = False + ## else: + ## expected = True + ## # + ## assert self.str_storage_supported(rffi.LONGLONG) == expected + ## assert self.str_storage_supported(rffi.DOUBLE) == expected def test_signed(self): buf = struct.pack('@ll', 42, 43) @@ -34,8 +34,8 @@ assert int(x) == 43 def test_float(self): - if not str_storage_supported(lltype.Float): - py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') + ## if not str_storage_supported(lltype.Float): + ## py.test.skip('str_storage_getitem(lltype.Float) not supported on this machine') buf = struct.pack('@dd', 12.3, 45.6) size = struct.calcsize('@d') assert self.str_storage_getitem(lltype.Float, buf, 0) == 12.3 @@ -52,20 +52,45 @@ class TestDirect(BaseStrStorageTest): - def str_storage_supported(self, TYPE): - return str_storage_supported(TYPE) + ## def str_storage_supported(self, TYPE): + ## return str_storage_supported(TYPE) def str_storage_getitem(self, TYPE, buf, offset): return str_storage_getitem(TYPE, buf, offset) class TestRTyping(BaseStrStorageTest, BaseRtypingTest): - def str_storage_supported(self, TYPE): - def fn(): - return str_storage_supported(TYPE) - return self.interpret(fn, []) + ## def str_storage_supported(self, TYPE): + ## def fn(): + ## return str_storage_supported(TYPE) + ## return self.interpret(fn, []) def str_storage_getitem(self, TYPE, buf, offset): def fn(offset): return str_storage_getitem(TYPE, buf, offset) return self.interpret(fn, [offset]) + + +class TestCompiled(BaseStrStorageTest): + cache = {} + + def str_storage_getitem(self, TYPE, buf, offset): + if TYPE not in self.cache: + from rpython.translator.c.test.test_genc import compile + + assert isinstance(TYPE, lltype.Primitive) + if TYPE in (lltype.Float, lltype.SingleFloat): + TARGET_TYPE = lltype.Float + else: + TARGET_TYPE = lltype.Signed + + def llf(buf, offset): + x = str_storage_getitem(TYPE, buf, offset) + return lltype.cast_primitive(TARGET_TYPE, x) + + fn = compile(llf, [str, int]) + self.cache[TYPE] = fn + # + fn = self.cache[TYPE] + x = fn(buf, offset) + return lltype.cast_primitive(TYPE, x) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -902,6 +902,14 @@ llobj = ctypes.sizeof(get_ctypes_type(llobj.TYPE)) * llobj.repeat elif isinstance(llobj, ComputedIntSymbolic): llobj = llobj.compute_fn() + elif isinstance(llobj, llmemory.CompositeOffset): + llobj = sum([lltype2ctypes(c) for c in llobj.offsets]) + elif isinstance(llobj, llmemory.FieldOffset): + CSTRUCT = get_ctypes_type(llobj.TYPE) + llobj = getattr(CSTRUCT, llobj.fldname).offset + elif isinstance(llobj, llmemory.ArrayItemsOffset): + CARRAY = get_ctypes_type(llobj.TYPE) + llobj = CARRAY.items.offset else: raise NotImplementedError(llobj) # don't know about symbolic value diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -417,6 +417,7 @@ 'raw_load': LLOp(sideeffects=False, canrun=True), From pypy.commits at gmail.com Thu Dec 31 11:50:16 2015 From: pypy.commits at gmail.com (sbauman) Date: Thu, 31 Dec 2015 08:50:16 -0800 (PST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge with default Message-ID: <56855cc8.2815c20a.d8200.ffffe02c@mx.google.com> Author: Spenser Andrew Bauman Branch: remove-getfield-pure Changeset: r81513:3e4151de2899 Date: 2015-12-23 20:29 -0500 http://bitbucket.org/pypy/pypy/changeset/3e4151de2899/ Log: Merge with default diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -29,4 +29,4 @@ release/ !pypy/tool/release/ rpython/_cache/ -__pycache__/ +.cache/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,9 @@ .. branch: fix-setslice-can-resize +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + .. branch: anntype2 A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: @@ -83,10 +86,18 @@ Trivial cleanups in flowspace.operation : fix comment & duplicated method .. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + .. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + .. branch: cpyext-slotdefs .. branch: fix-missing-canraise +.. branch: whatsnew .. branch: fix-2211 -Fix the cryptic exception message when attempting to use extended slicing in rpython +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -380,6 +380,17 @@ space.call_function(delattr_fn, w_self, w_name) return 0 api_func = slot_tp_setattro.api_func + elif name == 'tp_getattro': + getattr_fn = w_type.getdictvalue(space, '__getattribute__') + if getattr_fn is None: + return + + @cpython_api([PyObject, PyObject], PyObject, + error=lltype.nullptr(rffi.VOIDP.TO), external=True) + @func_renamer("cpyext_tp_getattro_%s" % (typedef.name,)) + def slot_tp_getattro(space, w_self, w_name): + return space.call_function(getattr_fn, w_self, w_name) + api_func = slot_tp_getattro.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -385,12 +385,53 @@ PyErr_SetString(PyExc_ValueError, "recursive tp_setattro"); return NULL; } + if (!args->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + if (args->ob_type->tp_getattro == + args->ob_type->tp_base->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "recursive tp_getattro"); + return NULL; + } Py_RETURN_TRUE; ''' ) ]) assert module.test_type(type(None)) + def test_tp_getattro(self): + module = self.import_extension('foo', [ + ("test_tp_getattro", "METH_VARARGS", + ''' + PyObject *obj = PyTuple_GET_ITEM(args, 0); + PyIntObject *value = PyTuple_GET_ITEM(args, 1); + if (!obj->ob_type->tp_getattro) + { + PyErr_SetString(PyExc_ValueError, "missing tp_getattro"); + return NULL; + } + PyObject *name = PyString_FromString("attr1"); + PyIntObject *attr1 = obj->ob_type->tp_getattro(obj, name); + if (attr1->ob_ival != value->ob_ival) + { + PyErr_SetString(PyExc_ValueError, + "tp_getattro returned wrong value"); + return NULL; + } + Py_DECREF(name); + Py_DECREF(attr1); + Py_RETURN_TRUE; + ''' + ) + ]) + class C: + def __init__(self): + self.attr1 = 123 + assert module.test_tp_getattro(C(), 123) + def test_nb_int(self): module = self.import_extension('foo', [ ("nb_int", "METH_O", diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -582,6 +582,8 @@ pto.c_tp_free = base.c_tp_free if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro + if not pto.c_tp_getattro: + pto.c_tp_getattro = base.c_tp_getattro finally: Py_DecRef(space, base_pyo) @@ -651,6 +653,12 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) + if not pto.c_tp_getattro: + from pypy.module.cpyext.object import PyObject_GenericGetAttr + pto.c_tp_getattro = llhelper( + PyObject_GenericGetAttr.api_func.functype, + PyObject_GenericGetAttr.api_func.get_wrapper(space)) + if w_obj.is_cpytype(): Py_DecRef(space, pto.c_tp_dict) w_dict = w_obj.getdict(space)